class VmSerializer(VmBaseSerializer): """ VM details (read-only) """ hostname = s.Field() uuid = s.CharField(read_only=True) alias = s.Field() node = s.SlugRelatedField(slug_field='hostname', read_only=True, required=False) owner = s.SlugRelatedField(slug_field='username', read_only=True) status = s.DisplayChoiceField(choices=Vm.STATUS, read_only=True) node_status = s.DisplayChoiceField(source='node.status', choices=Node.STATUS_DB, read_only=True) vcpus = s.IntegerField(read_only=True) ram = s.IntegerField(read_only=True) disk = s.IntegerField(read_only=True) ips = s.ArrayField(read_only=True) uptime = s.IntegerField(source='uptime_actual', read_only=True) locked = s.BooleanField(read_only=True)
class DomainSerializer(s.ConditionalDCBoundSerializer): """ pdns.models.Domain """ _model_ = Domain _update_fields_ = ('owner', 'access', 'desc', 'dc_bound', 'type') _default_fields_ = ('name', 'owner', 'type') _blank_fields_ = frozenset({'desc'}) name_changed = None name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._/-]*$', max_length=253, min_length=3) type = s.ChoiceField(choices=Domain.TYPE_MASTER, default=Domain.MASTER) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects) access = s.IntegerChoiceField(choices=Domain.ACCESS, default=Domain.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, domain, *args, **kwargs): super(DomainSerializer, self).__init__(request, domain, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = domain.dc_bound def _normalize(self, attr, value): if attr == 'dc_bound': if isinstance(self._dc_bound, Dc): self._dc_bound = self._dc_bound.id return self._dc_bound # noinspection PyProtectedMember return super(DomainSerializer, self)._normalize(attr, value) def validate_name(self, attrs, source): try: value = attrs[source].lower( ) # The domain name must be always lowercased (DB requirement) except KeyError: pass else: attrs[source] = value # Save lowercased domain name if self.object.pk: if self.object.name == value: return attrs else: self.name_changed = self.object.name # Save old domain name validate_dns_name(value) return attrs
class IsoSerializer(s.ConditionalDCBoundSerializer): """ vms.models.Iso """ _model_ = Iso _update_fields_ = ('alias', 'owner', 'access', 'desc', 'ostype', 'dc_bound') _default_fields_ = ('name', 'alias', 'owner') _null_fields_ = frozenset({'ostype'}) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=32) alias = s.SafeCharField(max_length=32) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects) access = s.IntegerChoiceField(choices=Iso.ACCESS, default=Iso.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) ostype = s.IntegerChoiceField(choices=Iso.OSTYPE, required=False) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, iso, *args, **kwargs): super(IsoSerializer, self).__init__(request, iso, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = iso.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(IsoSerializer, self)._normalize(attr, value) def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value) return attrs def validate(self, attrs): if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_ISO_LIMIT if limit is not None: if Iso.objects.filter(dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError(_('Maximum number of ISO images reached')) return super(IsoSerializer, self).validate(attrs)
class NodeSerializer(s.Serializer): """ Node details serializer (read-only). """ hostname = s.Field() address = s.Field() status = s.IntegerChoiceField(choices=Node.STATUS_DB, read_only=True) node_status = s.DisplayChoiceField(source='status', choices=Node.STATUS_DB, read_only=True) owner = s.SlugRelatedField(slug_field='username', read_only=True) is_head = s.BooleanField(read_only=True) cpu = s.IntegerField(source='cpu_total', read_only=True) ram = s.IntegerField(source='ram_total', read_only=True) cpu_free = s.IntegerField(read_only=True) ram_free = s.IntegerField(read_only=True) ram_kvm_overhead = s.IntegerField(read_only=True)
class BackupDefineSerializer(_HideNodeSerializer): """ vms.models.BackupDefine """ _model_ = BackupDefine _update_fields_ = ('type', 'desc', 'node', 'zpool', 'bwlimit', 'active', 'schedule', 'retention', 'compression') _default_fields_ = ('hostname', 'name', 'disk_id') hostname = s.CharField(source='vm.hostname', read_only=True) vm_uuid = s.CharField(source='vm.uuid', read_only=True) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=8, min_length=1) disk_id = s.IntegerField(source='array_disk_id', max_value=DISK_ID_MAX, min_value=DISK_ID_MIN) type = s.IntegerChoiceField(choices=BackupDefine.TYPE, default=BackupDefine.DATASET) node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects) # queryset set below zpool = s.CharField(max_length=64) # validated below desc = s.SafeCharField(max_length=128, required=False) bwlimit = s.IntegerField(required=False, min_value=0, max_value=2147483647) active = s.BooleanField(default=True) schedule = s.CronField() retention = s.IntegerField() # limits set below compression = s.IntegerChoiceField(choices=BackupDefine.COMPRESSION) fsfreeze = s.BooleanField(default=False) def __init__(self, request, instance, *args, **kwargs): vm_template = kwargs.pop('vm_template', False) self._update_fields_ = list(self._update_fields_) super(BackupDefineSerializer, self).__init__(request, instance, *args, **kwargs) if not kwargs.get('many', False): dc_settings = request.dc.settings backup_nodes = get_nodes(request, is_backup=True) self.fields['node'].queryset = backup_nodes self.fields['zpool'].default = dc_settings.VMS_STORAGE_DEFAULT self.fields[ 'compression'].default = dc_settings.VMS_VM_BACKUP_COMPRESSION_DEFAULT # Set first backup node and backup node storage available in DC # (used only when called by VmDefineBackup.create_from_template()) if vm_template: try: self.fields['node'].default = first_node = backup_nodes[0] except IndexError: pass else: first_node_zpools = get_zpools(request).filter( node=first_node).values_list('zpool', flat=True) if first_node_zpools and dc_settings.VMS_STORAGE_DEFAULT not in first_node_zpools: self.fields['zpool'].default = first_node_zpools[0] if request.method != 'POST': self.fields['type'].read_only = True # Limit maximum number of backups - Issue #chili-447 if dc_settings.VMS_VM_BACKUP_LIMIT is None: min_count, max_count = RETENTION_MIN, RETENTION_MAX else: min_count, max_count = 1, int(dc_settings.VMS_VM_BACKUP_LIMIT) self.fields['retention'].validators.append( validators.MinValueValidator(min_count)) self.fields['retention'].validators.append( validators.MaxValueValidator(max_count)) if instance.vm.is_kvm(): self._update_fields_.append('fsfreeze') def validate(self, attrs): try: zpool = attrs['zpool'] except KeyError: zpool = self.object.zpool try: node = attrs['node'] except KeyError: node = self.object.node try: attrs['zpool'] = get_zpools(self.request).get(node=node, zpool=zpool) except NodeStorage.DoesNotExist: self._errors['zpool'] = s.ErrorList( [_('Zpool does not exist on node.')]) # Check total number of existing backup definitions - Issue #chili-447 if self.request.method == 'POST': limit = self.request.dc.settings.VMS_VM_BACKUP_DEFINE_LIMIT if limit is not None: total = self._model_.objects.filter(vm=self.object.vm).count() if int(limit) <= total: raise s.ValidationError( _('Maximum number of backup definitions reached.')) return attrs
class UserSerializer(ApiKeysSerializer): """ gui.models.User """ _model_ = User _update_fields_ = ('email', 'first_name', 'last_name', 'is_super_admin', 'is_active', 'api_access', 'api_key', 'callback_key', 'groups', 'dc_bound', 'password') _default_fields_ = ('username', 'is_super_admin', 'is_active', 'api_access', 'password') username = s.RegexField(r'^[A-Za-z0-9@.+_-]*$', max_length=254) current_dc = s.SlugRelatedField(source='default_dc', slug_field='name', read_only=True, required=False) email = s.EmailField(max_length=254) first_name = s.SafeCharField(max_length=30) last_name = s.SafeCharField(max_length=30) is_super_admin = s.BooleanField(source='is_staff') is_active = s.BooleanField() api_access = s.BooleanField() groups = s.ArrayField(required=False, source='roles_api') dc_bound = s.BooleanField(source='dc_bound_bool', default=True) created = s.DateTimeField(source='date_joined', read_only=True) password = s.CharField() old_email = None # variable for value storage on email change is_staff_changed = False old_roles = () def __init__(self, request, user, *args, **kwargs): super(UserSerializer, self).__init__(request, user, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = user.dc_bound def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(UserSerializer, self)._normalize(attr, value) # noinspection PyProtectedMember @atomic def save(self, **kwargs): user = self.object new_flag = (not user.pk or getattr(user, 'new', False)) user.save() if user._roles_to_save is not None: self.old_roles = set(user.roles.all()) user.roles = user._roles_to_save # Newly created user via API is automatically marked as verified # Creator has to provide correct email, or in user profile set email as not verified (since email is required)! # Email change by user will trigger email with verification code so he can finish profile! # If admin doesnt set phone user is force to set it and when phone is changed sms verification is send if new_flag: user.userprofile.email_verified = True user.userprofile.phone_verified = True user.userprofile.save() # Changing a user email makes the email not verified # (unless request.user is not part of the staff or registration is disabled) if self.old_email and not self.request.user.is_staff and settings.REGISTRATION_ENABLED: user.userprofile.email_verified = False user.userprofile.email_token = user.userprofile.generate_token(6) user.userprofile.save() sendmail(user, 'accounts/user/base/profile_verify_subject.txt', 'accounts/user/base/profile_verify_email.txt', extra_context={ 'email_token': user.userprofile.email_token, }) def validate_username(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify elif value in INVALID_USERNAMES: raise s.ValidationError( s.WritableField.default_error_messages['invalid']) return attrs def validate_email(self, attrs, source): try: value = attrs[source] except KeyError: pass else: qs = User.objects if self.object.pk: if self.object.email == value: return attrs else: self.old_email = self.object.email qs = qs.exclude(pk=self.object.pk) # Check if someone does not use this email (or username) already if qs.filter(Q(email__iexact=value) | Q(username__iexact=value)).exists(): raise s.ValidationError( _('This email is already in use. Please supply a different email.' )) return attrs # noinspection PyMethodMayBeStatic def validate_groups(self, attrs, source): try: value = attrs[source] except KeyError: pass else: groups = [] for grp in value: try: group = Role.objects.get(name=grp) except Role.DoesNotExist: raise s.ObjectDoesNotExist(grp) else: if self.request.user.is_staff: groups.append(group) else: if group.dc_bound and self._dc_bound and group.dc_bound == self._dc_bound: groups.append(group) else: raise s.ValidationError( _('You don\'t have permission to use DC-unbound groups.' )) attrs[source] = groups return attrs def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: if not self.request.user.is_staff: raise s.NoPermissionToModify if value: data = self.init_data or {} self._dc_bound = get_dc( self.request, data.get('dc', self.request.dc.name)) else: self._dc_bound = None return attrs def validate_is_super_admin(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.is_staff != value: if self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify if self.request.user.is_staff: self.is_staff_changed = self.object.is_staff != value else: raise s.NoPermissionToModify return attrs def validate_is_active(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.is_active != value and self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify return attrs def validate_api_access(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.api_access != value and self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify return attrs def validate(self, attrs): # User is or will be bound to this DC dc = self._dc_bound if attrs.get('dc_bound_bool', self.object.dc_bound_bool) and attrs.get( 'is_staff', self.object.is_staff): self._errors['dc_bound'] = _( 'A SuperAdmin user cannot be DC-bound.') if dc: # User is or will be member of these groups try: groups = attrs['roles_api'] except KeyError: if self.object.pk: groups = self.object.roles.all() else: groups = () # A DC-bound user cannot be a member of a group that is assigned to another DC other than user.dc_bound if Dc.objects.filter(roles__in=groups).exclude(id=dc.id).exists(): self._errors['dc_bound'] = s.ErrorList([ _("User's group(s) are attached into another datacenter(s)." ) ]) return attrs def _setattr(self, instance, source, value): """Update user password if parameter was passed from es""" if source == 'password': self.object.set_password(value) else: # noinspection PyProtectedMember super(UserSerializer, self)._setattr(instance, source, value) def detail_dict(self, **kwargs): dd = super(UserSerializer, self).detail_dict(**kwargs) # Remove sensitive data from detail dict if 'password' in dd: dd['password'] = '******' if 'api_key' in dd: dd['api_key'] = '***' if 'callback_key' in dd: dd['callback_key'] = '***' return dd def to_native(self, obj): """Updated so we don't display password hash""" ret = super(UserSerializer, self).to_native(obj) if 'password' in ret: del ret['password'] if 'api_key' in ret: ret['api_key'] = '***' if 'callback_key' in ret: ret['callback_key'] = '***' return ret
class NodeStorageSerializer(s.InstanceSerializer): """ vms.models.NodeStorage """ error_negative_resources = s.ErrorList([_('Value is too low because of existing virtual machines.')]) _model_ = NodeStorage _default_fields_ = ('alias', 'owner', 'size_coef', 'zpool') node = s.Field(source='node.hostname') zpool = s.ChoiceField(source='zpool') alias = s.SafeCharField(source='storage.alias', max_length=32) owner = s.SlugRelatedField(source='storage.owner', slug_field='username', queryset=User.objects, required=False) access = s.IntegerChoiceField(source='storage.access', choices=Storage.ACCESS, default=Storage.PRIVATE) type = s.IntegerChoiceField(source='storage.type', choices=Storage.TYPE, default=Storage.LOCAL) size = s.IntegerField(source='storage.size_total', read_only=True) size_coef = s.DecimalField(source='storage.size_coef', min_value=0, max_digits=4, decimal_places=2) size_free = s.IntegerField(source='storage.size_free', read_only=True) created = s.DateTimeField(source='storage.created', read_only=True, required=False) desc = s.SafeCharField(source='storage.desc', max_length=128, required=False) def __init__(self, request, instance, *args, **kwargs): self._update_fields_ = ['alias', 'owner', 'access', 'desc', 'type', 'size_coef'] super(NodeStorageSerializer, self).__init__(request, instance, *args, **kwargs) if not kwargs.get('many', False): self._size_coef = instance.storage.size_coef self.fields['owner'].queryset = get_owners(request) if request.method == 'POST': self.fields['zpool'].choices = [(i, i) for i in instance.node.zpools.keys()] self._update_fields_.append('zpool') else: self.fields['zpool'].read_only = True def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Storage')) return attrs def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value, field_comparison='storage__alias__iexact') return attrs def validate(self, attrs): # Default owner is request.user, but setting this in __init__ does not work if 'storage.owner' in attrs and attrs['storage.owner'] is None: if self.object.pk: del attrs['storage.owner'] else: attrs['storage.owner'] = self.request.user return attrs @property def update_storage_resources(self): """True if size_coef changed""" return not(self.object.storage.size_coef == self._size_coef)
class NodeDefineSerializer(s.InstanceSerializer): """ vms.models.Node """ error_negative_resources = s.ErrorList( [_('Value is too low because of existing virtual machines.')]) _model_ = Node _update_fields_ = ('status', 'owner', 'is_compute', 'is_backup', 'cpu_coef', 'ram_coef', 'monitoring_hostgroups', 'monitoring_templates') hostname = s.CharField(read_only=True) uuid = s.CharField(read_only=True) address = s.CharField(read_only=True) status = s.IntegerChoiceField(choices=Node.STATUS_DB) node_status = s.DisplayChoiceField(source='status', choices=Node.STATUS_DB, read_only=True) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, read_only=False) is_head = s.BooleanField(read_only=True) is_compute = s.BooleanField() is_backup = s.BooleanField() cpu = s.IntegerField(source='cpu_total', read_only=True) ram = s.IntegerField(source='ram_total', read_only=True) cpu_coef = s.DecimalField(min_value=0, max_digits=4, decimal_places=2) ram_coef = s.DecimalField(min_value=0, max_value=1, max_digits=4, decimal_places=2) cpu_free = s.IntegerField(read_only=True) ram_free = s.IntegerField(read_only=True) ram_kvm_overhead = s.IntegerField(read_only=True) sysinfo = s.Field( source='api_sysinfo') # Field is read_only=True by default monitoring_hostgroups = s.ArrayField(max_items=16, default=[]) monitoring_templates = s.ArrayField(max_items=32, default=[]) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, instance, *args, **kwargs): super(NodeDefineSerializer, self).__init__(request, instance, *args, **kwargs) self.clear_cache = False self.status_changed = False self.monitoring_changed = False if not kwargs.get('many', False): # Used for update_node_resources() self._cpu_coef = instance.cpu_coef self._ram_coef = instance.ram_coef # Only active users self.fields['owner'].queryset = get_owners(request) def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Compute node')) return attrs def validate_status(self, attrs, source): """Mark the status change -> used for triggering the signal. Do not allow a manual status change from unlicensed status.""" try: value = attrs[source] except KeyError: return attrs if self.object.status != value: node = self.object if node.is_unlicensed(): raise s.ValidationError( _('Cannot change status. Please add a valid license first.' )) if node.is_unreachable() or node.is_offline( ): # Manual switch from unreachable and offline state if settings.DEBUG: logger.warning( 'DEBUG mode on => skipping status checking of node %s', self.object) elif not node_ping(self.object, all_workers=False ): # requires that node is really online raise s.ValidationError( _('Cannot change status. Compute node is down.')) self.clear_cache = True self.status_changed = value return attrs def validate_is_compute(self, attrs, source): """Search for defined VMs when turning compute capability off""" if source in attrs and self.object.is_compute != attrs[source]: if self.object.vm_set.exists(): raise s.ValidationError(_('Found existing VMs on node.')) self.clear_cache = True return attrs def validate_is_backup(self, attrs, source): """Search for existing backup definitions, which are using this node""" if source in attrs and self.object.is_backup != attrs[source]: if self.object.backupdefine_set.exists(): raise s.ValidationError( _('Found existing VM backup definitions.')) self.clear_cache = True # Check existing backups when removing node if self.request.method == 'DELETE': if self.object.backup_set.exists(): raise s.ValidationError(_('Found existing VM backups.')) self.clear_cache = True return attrs def validate_monitoring_hostgroups(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_hostgroups != attrs[ source]: self.monitoring_changed = True return attrs def validate_monitoring_templates(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_templates != attrs[ source]: self.monitoring_changed = True return attrs @property def update_node_resources(self): """True if cpu_coef or ram_coef changed""" return not (self.object.cpu_coef == self._cpu_coef and self.object.ram_coef == self._ram_coef)
class VmReplicaSerializer(s.InstanceSerializer): _model_ = SlaveVm _default_fields_ = ('repname', ) _update_fields_ = ('reserve_resources', 'sleep_time', 'enabled', 'bwlimit') hostname = s.CharField(source='master_vm.hostname', read_only=True) repname = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', source='name', max_length=24, min_length=1) node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects, required=True) # Updated only by POST root_zpool = s.CharField(max_length=64, required=False) # Updated only by POST disk_zpools = DiskPoolDictField(required=False) # Updated only by POST reserve_resources = s.BooleanField( default=True) # Default value changed below during POST sleep_time = s.IntegerField(source='rep_sleep_time', min_value=0, max_value=86400, default=60) enabled = s.BooleanField(source='rep_enabled', default=True) bwlimit = s.IntegerField(source='rep_bwlimit', required=False, min_value=0, max_value=2147483647) last_sync = s.DateTimeField(read_only=True, required=False) reinit_required = s.BooleanField(source='rep_reinit_required', read_only=True, required=False) node_status = s.DisplayChoiceField(source='vm.node.status', choices=Node.STATUS_DB, read_only=True) created = s.DateTimeField(source="vm.created", read_only=True, required=False) def __init__(self, request, slave_vm, *args, **kwargs): self.img_required = None self.reserve_resources_changed = False self._detail_dict = {} super(VmReplicaSerializer, self).__init__(request, slave_vm, *args, **kwargs) if request.method == 'POST': vm = slave_vm.vm dc_settings = request.dc.settings self.fields[ 'reserve_resources'].default = dc_settings.VMS_VM_REPLICA_RESERVATION_DEFAULT self.fields['node'].queryset = get_nodes(request, is_compute=True) self._disks = vm.json_get_disks() if vm.is_hvm(): self.fields['disk_zpools'].max_items = len(self._disks) else: del self.fields['disk_zpools'] else: self.fields['node'].required = False self.fields['node'].read_only = True self.fields['root_zpool'].read_only = True self.fields['disk_zpools'].read_only = True def validate_disk_zpools(self, attrs, source): """Basic disk_zpools validation (POST only)""" disk_zpools = attrs.get(source, None) if disk_zpools: if max(disk_zpools.keys()) > len(self._disks): raise s.ValidationError(_('Invalid disk_id.')) return attrs def validate_node(self, attrs, source): """Basic node validation (POST only)""" try: node = attrs[source] except KeyError: return attrs if node == self.object.node: raise s.ValidationError( _('Target node is the same as current node.')) if node.status != Node.ONLINE: raise s.ValidationError(_('Target node is not in online state.')) # Check nic tags try: validate_nic_tags(self.object.vm, new_node=node) except s.ValidationError: raise s.ValidationError( _('Some networks are not available on target node.')) return attrs def _validate_create(self, attrs): """Validate node storage zpools, resources, ... and create slave VM (POST only)""" node = attrs['node'] self._detail_dict['node'] = node.hostname slave_vm = self.object slave_vm.set_rep_hostname() slave_vm.node = node slave_vm.reserve_resources = attrs.get('reserve_resources', True) slave_vm_define = SlaveVmDefine(slave_vm) # Validate root_zpool (we can do this after we know the new node) root_zpool = attrs.get('root_zpool', None) try: root_zpool = slave_vm_define.save_root_zpool(root_zpool) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False else: if root_zpool: self._detail_dict['root_zpool'] = root_zpool # Validate disk_zpools (we can do this after we know the new node) if slave_vm.vm.is_hvm(): disk_zpools = attrs.get('disk_zpools', {}) try: disk_zpools = slave_vm_define.save_disk_zpools(disk_zpools) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False else: if disk_zpools: self._detail_dict['disk_zpools'] = disk_zpools # Validate dc_node resources try: slave_vm_define.validate_node_resources( ignore_cpu_ram=not slave_vm.reserve_resources) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False # Validate storage resources try: slave_vm_define.validate_storage_resources() except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False # Validate images self.img_required = slave_vm_define.check_required_images() # noinspection PyAttributeOutsideInit self.slave_vm_define = slave_vm_define return True def _validate_update(self, attrs): """Validate node resources if reserve_resources changed to True""" try: reserve_resource = attrs['reserve_resources'] except KeyError: pass else: # We need to know whether the user requested change of the reserve_resources attribute self.reserve_resources_changed = reserve_resource != self.object.reserve_resources if self.reserve_resources_changed and reserve_resource: slave_vm_define = SlaveVmDefine(self.object) try: slave_vm_define.validate_node_resources( ignore_cpu_ram=False, ignore_disk=True) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False return True def validate(self, attrs): if self.object.rep_reinit_required: raise s.ValidationError( _('Server replica requires re-initialization.')) if self.request.method == 'POST': total = SlaveVm.objects.filter( master_vm=self.object.master_vm).exclude(name=u'').count() self.object.rep_id = total + 1 limit = self.request.dc.settings.VMS_VM_REPLICA_LIMIT if limit is not None: if int(limit) <= total: raise s.ValidationError( _('Maximum number of server replicas reached.')) self._validate_create(attrs) else: # PUT self._validate_update(attrs) return attrs def save_slave_vm(self): """Initial saving of slave VM - used only by POST vm_replica""" # The only difference between a slave and master VM should be the hostname # hence we change the slave hostname temporarily to the real hostname for the purpose of sync_json() slave_vm = self.object hostname = slave_vm.vm.hostname slave_vm.vm.hostname = slave_vm.master_vm.hostname slave_vm.vm.choose_vnc_port() slave_vm.vm.sync_json() slave_vm.vm.hostname = hostname # We also don't want to save the replication state (which can be only updated by vm_replica_cb) sync_status = slave_vm.sync_status slave_vm.sync_status = SlaveVm.DIS self.slave_vm_define.save() slave_vm.sync_status = sync_status return self.slave_vm_define.slave_vm def node_image_import(self): if self.img_required: ns, img = self.img_required return NodeImageView.import_for_vm(self.request, ns, img, self.object) return None def detail_dict(self, **kwargs): # noinspection PyStatementEffect self.data dd = super(VmReplicaSerializer, self).detail_dict(**kwargs) dd.update(self._detail_dict) dd['repname'] = self.object.name return dd
class NetworkSerializer(s.InstanceSerializer): """ vms.models.Subnet """ _model_ = Subnet _update_fields_ = ('alias', 'owner', 'access', 'desc', 'network', 'netmask', 'gateway', 'resolvers', 'dns_domain', 'ptr_domain', 'nic_tag', 'vlan_id', 'dc_bound', 'dhcp_passthrough') _default_fields_ = ('name', 'alias', 'owner') _blank_fields_ = frozenset({'desc', 'dns_domain', 'ptr_domain'}) _null_fields_ = frozenset({'gateway'}) # min_length because of API URL: /network/ip/ name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', min_length=3, max_length=32) uuid = s.CharField(read_only=True) alias = s.SafeCharField(max_length=32) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, required=False) access = s.IntegerChoiceField(choices=Subnet.ACCESS, default=Subnet.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) network = s.IPAddressField() netmask = s.IPAddressField() gateway = s.IPAddressField(required=False) # can be null nic_tag = s.ChoiceField() vlan_id = s.IntegerField(min_value=0, max_value=4096) resolvers = s.IPAddressArrayField(source='resolvers_api', required=False, max_items=8) dns_domain = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=250, required=False) # can be blank ptr_domain = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=250, required=False) # can be blank dhcp_passthrough = s.BooleanField(default=False) dc_bound = s.BooleanField(source='dc_bound_bool', default=True) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, net, *args, **kwargs): super(NetworkSerializer, self).__init__(request, net, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = net.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) self.fields['nic_tag'].choices = [ (i, i) for i in DefaultDc().settings.VMS_NET_NIC_TAGS ] def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(NetworkSerializer, self)._normalize(attr, value) def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: self._dc_bound = validate_dc_bound(self.request, self.object, value, _('Network')) return attrs def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value) return attrs def validate_vlan_id(self, attrs, source): try: value = attrs[source] except KeyError: pass else: net = self.object if not net.new: # TODO: Cannot use ip__in=net_ips (ProgrammingError) net_ips = set(net.ipaddress_set.all().values_list('ip', flat=True)) other_ips = set( IPAddress.objects.exclude(subnet=net).filter( subnet__vlan_id=int(value)).values_list('ip', flat=True)) if net_ips.intersection(other_ips): raise s.ValidationError( _('Network has IP addresses that already exist in another ' 'network with the same VLAN ID.')) return attrs # noinspection PyMethodMayBeStatic def validate_ptr_domain(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if value: if not value.endswith('in-addr.arpa'): raise s.ValidationError(_('Invalid PTR domain name.')) if settings.DNS_ENABLED: if not Domain.objects.filter(name=value).exists(): raise s.ObjectDoesNotExist(value) return attrs def validate(self, attrs): try: network = attrs['network'] except KeyError: network = self.object.network try: netmask = attrs['netmask'] except KeyError: netmask = self.object.netmask try: ip_network = Subnet.get_ip_network(network, netmask) if ip_network.is_reserved: raise ValueError except ValueError: self._errors['network'] = self._errors['netmask'] = \ s.ErrorList([_('Enter a valid IPv4 network and netmask.')]) if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_NET_LIMIT if limit is not None: if Subnet.objects.filter( dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError( _('Maximum number of networks reached')) if self._dc_bound: try: vlan_id = attrs['vlan_id'] except KeyError: vlan_id = self.object.vlan_id dc_settings = self._dc_bound.settings if dc_settings.VMS_NET_VLAN_RESTRICT and vlan_id not in dc_settings.VMS_NET_VLAN_ALLOWED: self._errors['vlan_id'] = s.ErrorList( [_('VLAN ID is not available in datacenter.')]) return attrs # noinspection PyMethodMayBeStatic def update_errors(self, fields, err_msg): errors = {} for i in fields: errors[i] = s.ErrorList([err_msg]) return errors
class NodeDefineSerializer(s.InstanceSerializer): """ vms.models.Node """ error_negative_resources = s.ErrorList( [_('Value is too low because of existing virtual machines.')]) _model_ = Node _update_fields_ = ('status', 'owner', 'address', 'is_compute', 'is_backup', 'note', 'cpu_coef', 'ram_coef', 'monitoring_hostgroups', 'monitoring_templates') hostname = s.CharField(read_only=True) uuid = s.CharField(read_only=True) address = s.ChoiceField() status = s.IntegerChoiceField(choices=Node.STATUS_DB) node_status = s.DisplayChoiceField(source='status', choices=Node.STATUS_DB, read_only=True) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, read_only=False) is_head = s.BooleanField(read_only=True) is_compute = s.BooleanField() is_backup = s.BooleanField() note = s.CharField(required=False) cpu = s.IntegerField(source='cpu_total', read_only=True) ram = s.IntegerField(source='ram_total', read_only=True) cpu_coef = s.DecimalField(min_value=0, max_digits=4, decimal_places=2) ram_coef = s.DecimalField(min_value=0, max_value=1, max_digits=4, decimal_places=2) cpu_free = s.IntegerField(read_only=True) ram_free = s.IntegerField(read_only=True) ram_kvm_overhead = s.IntegerField(read_only=True) sysinfo = s.Field( source='api_sysinfo') # Field is read_only=True by default monitoring_hostgroups = s.ArrayField( max_items=16, default=[], validators=(RegexValidator( regex=MonitoringBackend.RE_MONITORING_HOSTGROUPS), )) monitoring_templates = s.ArrayField(max_items=32, default=[]) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, instance, *args, **kwargs): super(NodeDefineSerializer, self).__init__(request, instance, *args, **kwargs) self.clear_cache = False self.status_changed = False self.address_changed = False self.old_ip_address = None self.monitoring_changed = False if not kwargs.get('many', False): # Valid node IP addresses self.fields['address'].choices = [(ip, ip) for ip in instance.ips] # Used for update_node_resources() self._cpu_coef = instance.cpu_coef self._ram_coef = instance.ram_coef # Only active users self.fields['owner'].queryset = get_owners(request) def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Compute node')) return attrs def validate_address(self, attrs, source): """Mark that node IP address is going to change""" new_address = attrs.get(source, None) if new_address and self.object.address != new_address: self.address_changed = True try: self.old_ip_address = self.object.ip_address except ObjectDoesNotExist: self.old_ip_address = None return attrs def validate_status(self, attrs, source): """Mark the status change -> used for triggering the signal. Do not allow a manual status change from unlicensed status.""" try: value = attrs[source] except KeyError: return attrs if self.object.status != value: node = self.object if node.is_unlicensed(): raise s.ValidationError( _('Cannot change status. Please add a valid license first.' )) if node.is_unreachable() or node.is_offline( ): # Manual switch from unreachable and offline state if settings.DEBUG: logger.warning( 'DEBUG mode on => skipping status checking of node %s', self.object) elif not node_ping(self.object, all_workers=False ): # requires that node is really online raise s.ValidationError( _('Cannot change status. Compute node is down.')) self.clear_cache = True self.status_changed = value return attrs def validate_is_compute(self, attrs, source): """Search for defined VMs when turning compute capability off""" if source in attrs and self.object.is_compute != attrs[source]: if self.object.vm_set.exists(): raise s.ValidationError(_('Found existing VMs on node.')) self.clear_cache = True return attrs def validate_is_backup(self, attrs, source): """Search for existing backup definitions, which are using this node""" if source in attrs and self.object.is_backup != attrs[source]: if self.object.backupdefine_set.exists(): raise s.ValidationError( _('Found existing VM backup definitions.')) self.clear_cache = True # Check existing backups when removing node if self.request.method == 'DELETE': if self.object.backup_set.exists(): raise s.ValidationError(_('Found existing VM backups.')) self.clear_cache = True return attrs def validate_monitoring_hostgroups(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_hostgroups != attrs[ source]: self.monitoring_changed = True return attrs def validate_monitoring_templates(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_templates != attrs[ source]: self.monitoring_changed = True return attrs @property def update_node_resources(self): """True if cpu_coef or ram_coef changed""" return not (self.object.cpu_coef == self._cpu_coef and self.object.ram_coef == self._ram_coef) def save(self): """Update compute node attributes in database""" node = self.object # NOTE: # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free try: with transaction.atomic(): node.save(update_resources=self.update_node_resources, clear_cache=self.clear_cache) if self.update_node_resources: if node.cpu_free < 0 or node.dcnode_set.filter( cpu_free__lt=0).exists(): raise IntegrityError('cpu_check') if node.ram_free < 0 or node.dcnode_set.filter( ram_free__lt=0).exists(): raise IntegrityError('ram_check') except IntegrityError as exc: errors = {} exc_error = str(exc) # ram or cpu constraint was violated on vms_dcnode (can happen when DcNode strategy is set to RESERVED) # OR a an exception was raised above if 'ram_check' in exc_error: errors['ram_coef'] = self.error_negative_resources if 'cpu_check' in exc_error: errors['cpu_coef'] = self.error_negative_resources if not errors: raise exc return errors if self.update_node_resources: # cpu_free or ram_free changed self.reload() return None
class VmMigrateSerializer(s.Serializer): node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects, required=False) root_zpool = s.CharField(max_length=64, required=False) disk_zpools = DiskPoolDictField(required=False) def __init__(self, request, vm, *args, **kwargs): self.img_required = None self.request = request self.vm = vm super(VmMigrateSerializer, self).__init__(*args, **kwargs) self.fields['node'].queryset = get_nodes(request, is_compute=True) self._disks = vm.json_active_get_disks() if vm.is_kvm(): self.fields['disk_zpools'].max_items = len(self._disks) else: del self.fields['disk_zpools'] def validate_disk_zpools(self, attrs, source): disk_zpools = attrs.get(source, None) if disk_zpools: if max(disk_zpools.keys()) > len(self._disks): raise s.ValidationError(_('Invalid disk_id.')) return attrs def validate_node(self, attrs, source): """Basic node validation""" node = attrs.get(source, None) if not node: attrs.pop(source, None) return attrs vm = self.vm if node == vm.node: raise s.ValidationError( _('Target node is the same as current node.')) if node.status != Node.ONLINE: raise s.ValidationError(_('Target node is not in online state.')) # Check nic tags try: validate_nic_tags(vm, new_node=node) except s.ValidationError: raise s.ValidationError( _('Some networks are not available on target node.')) return attrs def validate(self, attrs): vm = self.vm node = attrs.get('node', vm.node) changing_node = attrs.get('node', vm.node) != vm.node # Ghost VM is a copy of a VM used to take up place in DB. # When node is changing we have to have all disks in a ghost VM. # When changing only disk pools, only the changed disks have to be in a ghost VM. ghost_vm = SlaveVm(_master_vm=vm) ghost_vm.reserve_resources = changing_node ghost_vm.set_migration_hostname() ghost_vm.node = node ghost_vm_define = SlaveVmDefine(ghost_vm) # Validate root_zpool (we can do this after we know the new node) root_zpool = attrs.get('root_zpool', None) # Every pool must be validated when changing node try: root_zpool = ghost_vm_define.save_root_zpool( root_zpool, save_same_zpool=changing_node) except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs # Validate disk_zpools (we can do this after we know the new node) if ghost_vm.vm.is_kvm(): disk_zpools = attrs.get('disk_zpools', {}) try: disk_zpools = ghost_vm_define.save_disk_zpools( disk_zpools, save_same_zpool=changing_node) except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs else: disk_zpools = {} # Nothing changed, he? if not changing_node and not (root_zpool or disk_zpools): raise s.ValidationError(_('Nothing to do.')) # Validate dc_node resources try: ghost_vm_define.validate_node_resources( ignore_cpu_ram=not changing_node) except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs # Validate storage resources try: ghost_vm_define.validate_storage_resources() except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs # Validate images self.img_required = ghost_vm_define.check_required_images() # Save params # noinspection PyAttributeOutsideInit self._root_zpool = root_zpool # noinspection PyAttributeOutsideInit self._disk_zpools = disk_zpools # noinspection PyAttributeOutsideInit self.ghost_vm_define = ghost_vm_define # noinspection PyAttributeOutsideInit self.changing_node = changing_node return attrs def save_ghost_vm(self): self.ghost_vm_define.save() return self.ghost_vm_define.slave_vm def node_image_import(self): if self.img_required: ns, img = self.img_required return NodeImageView.import_for_vm(self.request, ns, img, self.vm) return None @property def esmigrate_cmd(self): """Create esmigrate command""" vm = self.vm get_json = 'vmadm get %s 2>/dev/null' % vm.uuid params = [] if self.changing_node: node = self.object['node'] params.append('-H %s' % node.address) ssh = 'ssh -o ConnectTimeout=10 -o BatchMode=yes -o StrictHostKeyChecking=no ' \ '-o GSSAPIKeyExchange=no -o GSSAPIAuthentication=no -o LogLevel=QUIET -l root' get_json = '%s %s "%s"' % (ssh, node.address, get_json) if self._root_zpool: params.append('-p %s' % self._root_zpool) if self._disk_zpools: for n, zpool in self._disk_zpools.items(): n -= 1 params.append('-%s %s' % (n, zpool)) return 'esmigrate migrate %s %s >&2; ' % (vm.uuid, ' '.join(params)) + get_json def detail_dict(self, **kwargs): dd = {} if self.changing_node: dd['node'] = self.object['node'].hostname if self._root_zpool: dd['root_zpool'] = self._root_zpool if self._disk_zpools: dd['disk_zpools'] = self._disk_zpools return dd
class VmDcSerializer(s.Serializer): """ Validate target DC for VM. """ target_dc = s.SlugRelatedField(slug_field='name', queryset=Dc.objects) def __init__(self, request, vm, *args, **kwargs): self.request = request self.vm = vm self.dc = None self.nss = None super(VmDcSerializer, self).__init__(*args, **kwargs) self.fields['target_dc'].queryset = get_dcs(request) def validate_target_dc(self, attrs, source): new_dc = attrs.get(source, None) if not new_dc: return attrs vm = self.vm if not vm.node: raise s.ValidationError(_('VM has no compute node assigned.')) if vm.dc == new_dc: raise s.ValidationError( _('Target datacenter is the same as current datacenter.')) # Check node try: new_dc_node = vm.node.get_dc_node(new_dc) old_dc_node = vm.node.get_dc_node(vm.dc) except ObjectDoesNotExist: raise s.ValidationError( _('VM compute node is not available in target datacenter.')) # Check node storages self.nss = nss = vm.get_node_storages() for ns in nss: if not ns.dc.filter(pk=new_dc.pk).exists(): raise s.ValidationError( _('VM disk storages are not available in target datacenter.' )) # Check domain if vm.hostname_is_valid_fqdn(): try: new_dc.domaindc_set.get( domain_id=Domain.get_domain_id(vm.fqdn_domain)) except ObjectDoesNotExist: raise s.ValidationError( _('VM domain is not available in target datacenter.')) # Check templates if vm.template and not new_dc.vmtemplate_set.filter( id=vm.template.id).exists(): raise s.ValidationError( _('VM template is not available in target datacenter.')) # Check images vm_disks = vm.json_get_disks() + vm.json_active_get_disks() vm_images = set( [dsk['image_uuid'] for dsk in vm_disks if 'image_uuid' in dsk]) if vm_images and new_dc.image_set.filter( uuid__in=vm_images).distinct().count() != len(vm_images): raise s.ValidationError( _('VM disk image is not available in target datacenter.')) # Check networks vm_nics = vm.json_get_nics() + vm.json_active_get_nics() vm_networks = set( [nic['network_uuid'] for nic in vm_nics if 'network_uuid' in nic]) if vm_networks and new_dc.subnet_set.filter( uuid__in=vm_networks).distinct().count() != len(vm_networks): raise s.ValidationError( _('VM NIC networks are not available in target datacenter.')) # Check backup definition nodes and storages (pools) vm_bkpdefs = vm.backupdefine_set.all() vm_bkp_nodes = set([bd.node.uuid for bd in vm_bkpdefs]) vm_bkp_zpools = set([bd.zpool.id for bd in vm_bkpdefs]) if vm_bkp_nodes: new_dc_bkp_nodes_count = new_dc.node_set.filter(uuid__in=vm_bkp_nodes).values_list('uuid', flat=True)\ .distinct().count() if new_dc_bkp_nodes_count != len(vm_bkp_nodes): raise s.ValidationError( _('VM backup node is not available in target datacenter.')) new_dc_bkp_zpools_count = new_dc.nodestorage_set.filter(id__in=vm_bkp_zpools).values_list('id', flat=True)\ .distinct().count() if new_dc_bkp_zpools_count != len(vm_bkp_zpools): raise s.ValidationError( _('VM backup storage is not available in target datacenter.' )) # Check free resources only when dealing with RESERVED strategies on target or source DcNode if new_dc_node.strategy == DcNode.RESERVED or old_dc_node.strategy == DcNode.RESERVED: vm_resources = vm.get_cpu_ram_disk(zpool=vm.node.zpool, ram_overhead=True) if not new_dc_node.check_free_resources(*vm_resources): raise s.ValidationError( _('Not enough free compute node resources in target datacenter.' )) # Save new DC self.dc = new_dc return attrs
class TemplateSerializer(s.InstanceSerializer): """ vms.models.Template """ _model_ = VmTemplate _update_fields_ = ('alias', 'owner', 'access', 'desc', 'ostype', 'dc_bound', 'vm_define', 'vm_define_disk', 'vm_define_nic', 'vm_define_snapshot', 'vm_define_backup') _default_fields_ = ('name', 'alias', 'owner') _null_fields_ = frozenset({ 'ostype', 'vm_define', 'vm_define_disk', 'vm_define_nic', 'vm_define_snapshot', 'vm_define_backup' }) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=32) alias = s.SafeCharField(max_length=32) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, required=False) access = s.IntegerChoiceField(choices=VmTemplate.ACCESS, default=VmTemplate.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) ostype = s.IntegerChoiceField(choices=VmTemplate.OSTYPE, required=False, default=None) dc_bound = s.BooleanField(source='dc_bound_bool', default=True) vm_define = VmDefineField(default={}, required=False) vm_define_disk = VmDefineDiskField(default=[], required=False, max_items=2) vm_define_nic = VmDefineNicField(default=[], required=False, max_items=4) vm_define_snapshot = VmDefineSnapshotField(default=[], required=False, max_items=16) vm_define_backup = VmDefineBackupField(default=[], required=False, max_items=16) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, tmp, *args, **kwargs): super(TemplateSerializer, self).__init__(request, tmp, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = tmp.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(TemplateSerializer, self)._normalize(attr, value) def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: self._dc_bound = validate_dc_bound(self.request, self.object, value, _('Template')) return attrs def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value) return attrs def validate(self, attrs): if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_TEMPLATE_LIMIT if limit is not None: if VmTemplate.objects.filter( dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError( _('Maximum number of server templates reached')) return attrs
class ImageSerializer(s.InstanceSerializer): """ vms.models.Image Also used in api.dc.image.serializers. """ _backup_attrs_map_ = { 'owner': 'owner_id', 'dc_bound': 'dc_bound_id', } _model_ = Image _update_fields_ = ('alias', 'version', 'dc_bound', 'owner', 'access', 'desc', 'resize', 'deploy', 'tags') # TODO: 'nic_model', 'disk_model' _default_fields_ = ('name', 'alias', 'owner') name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=32) uuid = s.CharField(read_only=True) alias = s.SafeCharField(max_length=32) version = s.SafeCharField(max_length=16, default='1.0') owner = s.SlugRelatedField(slug_field='username', queryset=User.objects) access = s.IntegerChoiceField(choices=Image.ACCESS, default=Image.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) ostype = s.IntegerChoiceField(choices=Image.OSTYPE, read_only=True) size = s.IntegerField(read_only=True) resize = s.BooleanField(default=False) deploy = s.BooleanField(default=False) # nic_model = s.ChoiceField(choices=Vm.NIC_MODEL) # KVM only # disk_model = s.ChoiceField(choices=Vm.DISK_MODEL) # KVM only tags = s.TagField(required=False, default=[]) dc_bound = s.BooleanField(source='dc_bound_bool', default=True) status = s.IntegerChoiceField(choices=Image.STATUS, read_only=True, required=False) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, img, *args, **kwargs): super(ImageSerializer, self).__init__(request, img, *args, **kwargs) if not kwargs.get('many', False): self.update_manifest = True self._dc_bound = img.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) def create_img_backup(self): """Creates a dictionary that maps Image object attributes to its values; this will be used as a backup in case the update should fail""" items = self._backup_attrs_map_ return { items.get(attr, attr): getattr(self.object, items.get(attr, attr)) for attr in self._update_fields_ } def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(ImageSerializer, self)._normalize(attr, value) def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Image')) return attrs def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: self._dc_bound = validate_dc_bound(self.request, self.object, value, _('Image')) return attrs def validate(self, attrs): db_only_manifest_keys = {'dc_bound', 'dc_bound_bool', 'owner'} if db_only_manifest_keys.issuperset(attrs.keys()): self.update_manifest = False try: alias = attrs['alias'] except KeyError: alias = self.object.alias try: version = attrs['version'] except KeyError: version = self.object.version qs = Image.objects if self.object.pk: qs = qs.exclude(pk=self.object.pk) if qs.filter(alias__iexact=alias, version=version).exists(): self._errors['alias'] = s.ErrorList([ _('This alias is already in use. ' 'Please supply a different alias or version.') ]) if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_IMAGE_LIMIT if limit is not None: if Image.objects.filter( dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError( _('Maximum number of server disk images reached')) return attrs
class DcSerializer(s.InstanceSerializer): """ vms.models.Dc """ _model_ = Dc _update_fields_ = ('alias', 'owner', 'access', 'desc', 'site', 'groups') _default_fields_ = ('name', 'alias', 'owner', 'site') owner_changed = None groups_changed = None groups_added = None groups_removed = None removed_users = None name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=16) alias = s.SafeCharField(max_length=32) site = s.RegexField(r'^[a-z0-9][a-z0-9\.:-]+[a-z0-9]$', max_length=260, min_length=1) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, read_only=False, required=False) access = s.IntegerChoiceField(choices=Dc.ACCESS, default=Dc.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, instance, *args, **kwargs): super(DcSerializer, self).__init__(request, instance, *args, **kwargs) if not kwargs.get('many', False): self.fields['owner'].default = request.user.username # Does not work self.fields['owner'].queryset = get_owners(request, all=True) def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value) return attrs def validate_site(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.pk and self.object.site == value: pass elif Dc.objects.filter(site__iexact=value).exists(): raise s.ValidationError(_('This site hostname is already in use. ' 'Please supply a different site hostname.')) return attrs def validate_access(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.pk and self.object.is_default() and int(value) != Dc.PUBLIC: raise s.ValidationError(_('Default datacenter must be public.')) return attrs def validate_owner(self, attrs, source): try: user = attrs[source] except KeyError: pass else: if user is None: if self.object.pk: del attrs['owner'] else: attrs['owner'] = self.request.user elif self.object.pk: if self.object.is_default() and not user.is_staff: raise s.ValidationError(_('Default datacenter must be owned by user with SuperAdmin rights.')) if user != self.object.owner: self.owner_changed = self.object.owner # Save old owner # Cannot change owner while pending tasks exist validate_owner(self.object, user, _('Datacenter')) return attrs def validate_groups(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.pk: current_roles = set(self.object.roles.all()) new_roles = set(value) if current_roles != new_roles: self.groups_added = new_roles - current_roles self.groups_removed = current_roles - new_roles self.groups_changed = current_roles.symmetric_difference(new_roles) self.removed_users = User.objects.distinct().filter(roles__in=self.groups_removed) return attrs
class NetworkSerializer(s.ConditionalDCBoundSerializer): """ vms.models.Subnet """ _model_ = Subnet _update_fields_ = ('alias', 'owner', 'access', 'desc', 'network', 'netmask', 'gateway', 'resolvers', 'dns_domain', 'ptr_domain', 'nic_tag', 'vlan_id', 'dc_bound', 'dhcp_passthrough', 'vxlan_id', 'mtu') _default_fields_ = ('name', 'alias', 'owner') _blank_fields_ = frozenset({'desc', 'dns_domain', 'ptr_domain'}) _null_fields_ = frozenset({'gateway'}) # min_length because of API URL: /network/ip/ name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', min_length=3, max_length=32) uuid = s.CharField(read_only=True) alias = s.SafeCharField(max_length=32) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, required=False) access = s.IntegerChoiceField(choices=Subnet.ACCESS, default=Subnet.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) network = s.IPAddressField() netmask = s.IPAddressField() gateway = s.IPAddressField(required=False) # can be null nic_tag = s.ChoiceField() nic_tag_type = s.CharField(read_only=True) vlan_id = s.IntegerField(min_value=0, max_value=4096) vxlan_id = s.IntegerField(min_value=1, max_value=16777215, required=False) # (2**24 - 1) based on RFC 7348 mtu = s.IntegerField(min_value=576, max_value=9000, required=False) # values from man vmadm resolvers = s.IPAddressArrayField(source='resolvers_api', required=False, max_items=8) dns_domain = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=250, required=False) # can be blank ptr_domain = s.RegexField(r'^[A-Za-z0-9][/A-Za-z0-9\._-]*$', max_length=250, required=False) # can be blank dhcp_passthrough = s.BooleanField(default=False) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, net, *args, **kwargs): super(NetworkSerializer, self).__init__(request, net, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = net.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) self.fields['nic_tag'].choices = Node.all_nictags_choices() def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(NetworkSerializer, self)._normalize(attr, value) def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value) return attrs def validate_vlan_id(self, attrs, source): try: value = attrs[source] except KeyError: pass else: net = self.object if not net.new: # TODO: Cannot use ip__in=net_ips (ProgrammingError) net_ips = set(net.ipaddress_set.all().values_list('ip', flat=True)) other_ips = set( IPAddress.objects.exclude(subnet=net).filter( subnet__vlan_id=int(value)).values_list('ip', flat=True)) if net_ips.intersection(other_ips): raise s.ValidationError( _('Network has IP addresses that already exist in another ' 'network with the same VLAN ID.')) return attrs # noinspection PyMethodMayBeStatic def validate_ptr_domain(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if value: if not value.endswith('in-addr.arpa'): raise s.ValidationError(_('Invalid PTR domain name.')) if settings.DNS_ENABLED: if not Domain.objects.filter(name=value).exists(): raise s.ObjectDoesNotExist(value) return attrs def validate(self, attrs): # noqa: R701 try: network = attrs['network'] except KeyError: network = self.object.network try: netmask = attrs['netmask'] except KeyError: netmask = self.object.netmask try: vxlan_id = attrs['vxlan_id'] except KeyError: vxlan_id = self.object.vxlan_id try: mtu = attrs['mtu'] except KeyError: mtu = self.object.mtu try: nic_tag = attrs['nic_tag'] except KeyError: nic_tag = self.object.nic_tag try: ip_network = Subnet.get_ip_network(network, netmask) if ip_network.is_reserved: raise ValueError except ValueError: self._errors['network'] = self._errors['netmask'] = \ s.ErrorList([_('Enter a valid IPv4 network and netmask.')]) if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_NET_LIMIT if limit is not None: if Subnet.objects.filter( dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError( _('Maximum number of networks reached.')) nic_tag_type = Node.all_nictags()[nic_tag] # retrieve all available nictags and see what is the type of the current nic tag # if type is overlay then vxlan is mandatory argument if nic_tag_type == 'overlay rule': if not vxlan_id: self._errors['vxlan_id'] = s.ErrorList([ _('VXLAN ID is required when an ' 'overlay NIC tag is selected.') ]) else: attrs['vxlan_id'] = None # validate MTU for overlays and etherstubs, and physical nics if nic_tag_type == 'overlay rule': # if MTU was not set for the overlay if not mtu: attrs['mtu'] = 1400 if mtu > 8900: self._errors['mtu'] = s.ErrorList([ s.IntegerField.default_error_messages['max_value'] % { 'limit_value': 8900 } ]) if nic_tag_type in ('normal', 'aggr') and mtu and mtu < 1500: self._errors['mtu'] = s.ErrorList([ s.IntegerField.default_error_messages['min_value'] % { 'limit_value': 1500 } ]) if self._dc_bound: try: vlan_id = attrs['vlan_id'] except KeyError: vlan_id = self.object.vlan_id dc_settings = self._dc_bound.settings if dc_settings.VMS_NET_VLAN_RESTRICT and vlan_id not in dc_settings.VMS_NET_VLAN_ALLOWED: self._errors['vlan_id'] = s.ErrorList( [_('VLAN ID is not available in datacenter.')]) if dc_settings.VMS_NET_VXLAN_RESTRICT and vxlan_id not in dc_settings.VMS_NET_VXLAN_ALLOWED: self._errors['vxlan_id'] = s.ErrorList( [_('VXLAN ID is not available in datacenter.')]) return super(NetworkSerializer, self).validate(attrs) # noinspection PyMethodMayBeStatic def update_errors(self, fields, err_msg): errors = {} for i in fields: errors[i] = s.ErrorList([err_msg]) return errors