class SnapshotSerializer(s.InstanceSerializer): """ vms.models.Snapshot """ _model_ = Snapshot _update_fields_ = ('note', ) _default_fields_ = ('hostname', 'name', 'disk_id') hostname = s.CharField(source='vm.hostname', read_only=True) vm_uuid = s.CharField(source='vm.uuid', read_only=True) define = s.CharField(source='define.name', read_only=True) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=24, min_length=1) disk_id = s.IntegerField(source='array_disk_id', max_value=DISK_ID_MAX, min_value=DISK_ID_MIN) note = s.SafeCharField(max_length=128, required=False) type = s.IntegerChoiceField(choices=Snapshot.TYPE, default=2, read_only=True) created = s.DateTimeField(read_only=True, required=False) status = s.IntegerChoiceField(choices=Snapshot.STATUS, read_only=True, required=False) size = s.IntegerField(read_only=True) id = s.SafeCharField(read_only=True)
class VmMonitoringSerializer(s.InstanceSerializer): """ Serializer for validating mon_vm_define parameters. """ _model_ = Vm _blank_fields_ = ('ip', ) _update_fields_ = ('ip', 'dns', 'port', 'useip', 'proxy', 'templates', 'hostgroups') hostname = s.CharField(read_only=True) uuid = s.CharField(read_only=True) monitored = s.BooleanField(read_only=True) ip = s.IPAddressField(source='monitoring_ip', required=False) dns = s.RegexField(r'^[A-Za-z0-9\.-]+$', source='monitoring_dns', required=False, min_length=1, max_length=128) port = s.IntegerField(source='monitoring_port', required=False, min_value=1, max_value=65535) useip = s.BooleanField(source='monitoring_useip', required=False) proxy = s.CharField(source='monitoring_proxy', required=False, min_length=1, max_length=128) templates = s.ArrayField(source='monitoring_templates', max_items=32, required=False, default=[]) hostgroups = s.ArrayField( source='monitoring_hostgroups', max_items=16, required=False, default=[], validators=(RegexValidator( regex=MonitoringBackend.RE_MONITORING_HOSTGROUPS), )) def __init__(self, request, vm, *args, **kwargs): super(VmMonitoringSerializer, self).__init__(request, vm, *args, **kwargs) self.dc_settings = dc_settings = request.dc.settings self.fields['dns'].default = vm.hostname self.fields['port'].default = dc_settings.MON_ZABBIX_HOST_VM_PORT self.fields['useip'].default = dc_settings.MON_ZABBIX_HOST_VM_USEIP self.fields['proxy'].default = dc_settings.MON_ZABBIX_HOST_VM_PROXY def validate_useip(self, attrs, source): # null value will remove the useip parameter in monitoring_useip property => the default value will be used if source in attrs and self.init_data.get('useip', True) is None: attrs[source] = None return attrs # Allow to use only available templates validate_templates = VmDefineSerializer.validate_monitoring_templates.im_func # Allow to use only available hostgroups validate_hostgroups = VmDefineSerializer.validate_monitoring_hostgroups.im_func
class SnapshotDefineSerializer(s.InstanceSerializer): """ vms.models.SnapshotDefine """ _model_ = SnapshotDefine _update_fields_ = ('desc', 'active', 'schedule', 'retention') _default_fields_ = ('hostname', 'name', 'disk_id') hostname = s.CharField(source='vm.hostname', read_only=True) vm_uuid = s.CharField(source='vm.uuid', read_only=True) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=8, min_length=1) disk_id = s.IntegerField(source='array_disk_id', max_value=DISK_ID_MAX, min_value=DISK_ID_MIN) desc = s.SafeCharField(max_length=128, required=False) active = s.BooleanField(default=True) schedule = s.CronField() retention = s.IntegerField() # limits set below fsfreeze = s.BooleanField(default=False) def __init__(self, request, instance, *args, **kwargs): super(SnapshotDefineSerializer, self).__init__(request, instance, *args, **kwargs) if not kwargs.get('many', False): dc_settings = request.dc.settings # Limit maximum number of snapshots - Issue #chili-447 if dc_settings.VMS_VM_SNAPSHOT_LIMIT_AUTO is None: min_count, max_count = RETENTION_MIN, RETENTION_MAX else: min_count, max_count = 1, int( dc_settings.VMS_VM_SNAPSHOT_LIMIT_AUTO) self.fields['retention'].validators.append( validators.MinValueValidator(min_count)) self.fields['retention'].validators.append( validators.MaxValueValidator(max_count)) if instance.vm.is_hvm(): self._update_fields_ = list(self._update_fields_) self._update_fields_.append('fsfreeze') if instance.vm.is_bhyve(): self.fields['disk_id'].max_value = DISK_ID_MAX_BHYVE def validate(self, attrs): # Check total number of existing snapshot definitions - Issue #chili-447 if self.request.method == 'POST': limit = self.request.dc.settings.VMS_VM_SNAPSHOT_DEFINE_LIMIT if limit is not None: total = self._model_.objects.filter(vm=self.object.vm).count() if int(limit) <= total: raise s.ValidationError( _('Maximum number of snapshot definitions reached.')) return attrs
class VmStatusSerializer(s.Serializer): hostname = s.CharField(read_only=True) uuid = s.CharField(read_only=True) alias = s.CharField(read_only=True) status = s.DisplayChoiceField(choices=Vm.STATUS, read_only=True) status_change = s.DateTimeField(read_only=True) node_status = s.DisplayChoiceField(source='node.status', choices=Node.STATUS_DB, read_only=True) tasks = s.CharField(source='tasks', read_only=True) uptime = s.IntegerField(source='uptime_actual', read_only=True)
class UpdateSerializer(s.Serializer): """ Validate update urls and login credentials. """ version = s.CharField(required=True, max_length=1024, min_length=2) key = s.CharField(required=False, max_length=1048576, validators=(validate_pem_key, )) cert = s.CharField(required=False, max_length=1048576, validators=(validate_pem_cert, )) def __init__(self, request, *args, **kwargs): self.request = request super(UpdateSerializer, self).__init__(*args, **kwargs)
class TincHostSerializer(serializers.ModelSerializer): """ TincHost readonly serializer """ name = serializers.CharField(read_only=True) pubkey = serializers.CharField(required=True) addresses = TincAddressSerializer() class Meta: model = TincHost fields = ('name', 'pubkey', 'addresses') def to_native(self, obj): """ Keep API clean. A tinc without pubkey is equivalent to None {} """ if obj.pubkey is None: return None return super(TincHostSerializer, self).to_native(obj)
class RecordSerializer(s.InstanceSerializer): """ pdns.models.Record """ _model_ = Record _update_fields_ = ('name', 'type', 'content', 'ttl', 'prio', 'disabled') _null_fields_ = frozenset({'content', 'ttl', 'prio'}) id = s.Field() domain = s.Field() name = s.CharField(max_length=253) # Validated via pdns.validators type = s.ChoiceField(choices=Record.TYPE) content = s.CharField(max_length=65535, required=False) ttl = s.IntegerField(default=Record.TTL, required=False, min_value=0, max_value=2147483647) prio = s.IntegerField(default=Record.PRIO, required=False, min_value=0, max_value=65535) disabled = s.BooleanField(default=False) changed = s.DateTimeField(read_only=True, required=False) # noinspection PyMethodMayBeStatic def validate_name(self, attrs, source): if source in attrs: name = attrs[source] if name == '@': name = self.object.domain.name attrs[source] = name.lower() # The record name must be always lower-cased (DB requirement) return attrs def validate(self, attrs): record = self.object try: run_record_validator(record.domain, attrs.get('type', record.type), attrs.get('name', record.name), attrs.get('content', record.content)) except RecordValidationError as exc: self._errors = exc.message_dict return attrs def detail_dict(self, **kwargs): """Always include id and name""" dd = super(RecordSerializer, self).detail_dict(**kwargs) dd['id'] = self.object.id dd['name'] = self.object.name return dd
class ResourceSerializer(serializers.ModelSerializer): avail = serializers.Field() unit = serializers.CharField(read_only=True) class Meta: model = Resource fields = ['name', 'max_req', 'dflt_req', 'unit', 'avail']
def create_dummy_serializer(serializer_cls, skip_fields=(), required_fields=()): """Convert existing serializer class into serializer that can be used as a serializer field. The resulting serializer is missing the original validators and field required attribute @type serializer_cls: api.serializers.Serializer """ class Serializer(s.Serializer): pass # noinspection PyUnresolvedReferences for name, field in iteritems(serializer_cls.base_fields): if name in skip_fields or field.read_only: continue if isinstance(field, s.RelatedField): new_field = s.CharField() else: new_field = deepcopy(field) # Do not touch the original field if name in required_fields: new_field.required = True else: new_field.required = False # noinspection PyUnresolvedReferences Serializer.base_fields[name] = new_field return Serializer
class NodeGeolocationSerializer(serializers.ModelSerializer): coordinates = serializers.CharField(source='geolocation') lat = serializers.Field() lon = serializers.Field() class Meta: model = NodeGeolocation fields = ('address', 'coordinates', 'lat', 'lon')
class ActionSerializer(s.Serializer): name = s.SafeCharField(max_length=200) # The name in Zabbix will be prefixed with DC name enabled = s.BooleanField(default=True) # As we implement dynamic hostgroup creation everywhere, we will not validate whether any hostgroup exists. # Also we don't have to have any hostgroup defined while we create the Action as it is not a required field. hostgroups = s.ArrayField(max_items=1024, default=[], validators=(RegexValidator(regex=MonitoringBackend.RE_MONITORING_HOSTGROUPS),)) usergroups = s.ArrayField(max_items=1024) message_subject = s.CharField(max_length=255, default=DEFAULT_ACTION_MESSAGE_SUBJECT) message_text = s.CharField(default=DEFAULT_ACTION_MESSAGE) recovery_message_enabled = s.BooleanField(default=False) recovery_message_subject = s.CharField(max_length=255, default=DEFAULT_ACTION_MESSAGE_SUBJECT) recovery_message_text = s.CharField(default=DEFAULT_ACTION_MESSAGE) def __init__(self, request, *args, **kwargs): super(ActionSerializer, self).__init__(*args, **kwargs) self.request = request self.dc_settings = request.dc.settings if request.method == 'POST': self.fields['usergroups'].required = True else: self.fields['usergroups'].default = [] self.fields['usergroups'].required = False def validate_usergroups(self, attrs, source): # User groups are created in the monitoring system according to groups in the DB. We should validate this array # against groups available in the current DC. try: groups_requested = set(attrs[source]) except KeyError: pass else: if not groups_requested: # The Zabbix API does not allow to set empty operations raise s.ValidationError(s.WritableField.default_error_messages['invalid']) groups_available = set(Role.objects.filter(dc=self.request.dc, name__in=groups_requested) .values_list('name', flat=True)) groups_unavailable = groups_requested - groups_available if groups_unavailable: raise s.ValidationError([_('User group with name=%s does not exist.') % group for group in groups_unavailable]) return attrs
class SSLCertificateSerializer(s.Serializer): """ Validate update urls and login credentials. """ cert = s.CharField(max_length=2097152, validators=(validate_pem_cert, validate_pem_key)) def __init__(self, request, *args, **kwargs): self.request = request super(SSLCertificateSerializer, self).__init__(*args, **kwargs)
class VmStatusActionIsoSerializer(s.Serializer): iso = None iso2 = None cdimage = s.CharField(required=False) cdimage2 = s.CharField(required=False) cdimage_once = s.BooleanField(default=True) def __init__(self, request, vm, *args, **kwargs): self.request = request self.vm = vm super(VmStatusActionIsoSerializer, self).__init__(*args, **kwargs) def validate_iso(self, value): try: return get_iso_images(self.request, self.vm.ostype).get(name=value) except Iso.DoesNotExist: msg = s.ChoiceField.default_error_messages['invalid_choice'] raise s.ValidationError(msg % {'value': value}) def validate_cdimage(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if value: self.iso = self.validate_iso(value) return attrs def validate_cdimage2(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if value: self.iso2 = self.validate_iso(value) return attrs
class NetworkNodeMonHistorySerializer(MonHistorySerializer): """ Used by NodeHistoryView to validate nic_id value. """ nic = s.CharField(required=True) def validate(self, attrs): nic = attrs.get('nic') assert nic if nic in self.obj.used_nics: self.item_id = nic else: raise s.ValidationError(_('NIC not defined on compute node.')) return attrs
class StorageNodeMonHistorySerializer(MonHistorySerializer): """ Used by NodeHistoryView to validate zpools value. """ zpool = s.CharField(required=True) def validate(self, attrs): zpool = attrs.get('zpool') assert zpool if zpool in self.obj.zpools: self.item_id = zpool else: raise s.ValidationError(_('Zpool not defined on compute node.')) return attrs
class PasswordSerializer(serializers.Serializer): disable_password = serializers.BooleanField(default=True) password = serializers.CharField(default='', required=False) def __init__(self, node, *args, **kwargs): super(PasswordSerializer, self).__init__(*args, **kwargs) self.node = node def validate(self, attrs): # check that password is provided or has been disabled if not attrs['disable_password'] and not attrs['password']: raise serializers.ValidationError( 'You must provide a password or disable it.') return attrs def process_post(self): assert self.is_valid() self.data['disabled'] = self.data['disable_password'] return process_data(self.node, self.data)
class VmSerializer(VmBaseSerializer): """ VM details (read-only) """ hostname = s.Field() uuid = s.CharField(read_only=True) alias = s.Field() node = s.SlugRelatedField(slug_field='hostname', read_only=True, required=False) owner = s.SlugRelatedField(slug_field='username', read_only=True) status = s.DisplayChoiceField(choices=Vm.STATUS, read_only=True) node_status = s.DisplayChoiceField(source='node.status', choices=Node.STATUS_DB, read_only=True) vcpus = s.IntegerField(read_only=True) ram = s.IntegerField(read_only=True) disk = s.IntegerField(read_only=True) ips = s.ArrayField(read_only=True) uptime = s.IntegerField(source='uptime_actual', read_only=True) locked = s.BooleanField(read_only=True)
class BackupSerializer(_HideNodeSerializer): """ vms.models.Backup """ _model_ = Backup _update_fields_ = ('note', ) _default_fields_ = ('hostname', 'vm', 'dc', 'name', 'disk_id') hostname = s.CharField(source='vm_hostname', read_only=True) vm_uuid = s.CharField(source='vm.uuid', read_only=True) vm = s.CharField(source='vm', required=False, read_only=True) dc = s.CharField(source='dc', read_only=True) define = s.CharField(source='define.name', read_only=True) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=24, min_length=1) disk_id = s.IntegerField(source='array_disk_id', max_value=DISK_ID_MAX, min_value=DISK_ID_MIN) type = s.IntegerChoiceField(choices=Backup.TYPE, read_only=True) node = s.CharField(source='node.hostname', read_only=True) zpool = s.CharField(source='zpool.zpool', read_only=True) created = s.DateTimeField(read_only=True, required=False) status = s.IntegerChoiceField(choices=Backup.STATUS, read_only=True, required=False) size = s.IntegerField(read_only=True) time = s.IntegerField(read_only=True) file_path = s.CharField(read_only=True) note = s.SafeCharField(max_length=128, required=False) def __init__(self, request, instance, node_view=False, *args, **kwargs): super(BackupSerializer, self).__init__(request, instance, *args, **kwargs) if not node_view: del self.fields['dc']
class BackupDefineSerializer(_HideNodeSerializer): """ vms.models.BackupDefine """ _model_ = BackupDefine _update_fields_ = ('type', 'desc', 'node', 'zpool', 'bwlimit', 'active', 'schedule', 'retention', 'compression') _default_fields_ = ('hostname', 'name', 'disk_id') hostname = s.CharField(source='vm.hostname', read_only=True) vm_uuid = s.CharField(source='vm.uuid', read_only=True) name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=8, min_length=1) disk_id = s.IntegerField(source='array_disk_id', max_value=DISK_ID_MAX, min_value=DISK_ID_MIN) type = s.IntegerChoiceField(choices=BackupDefine.TYPE, default=BackupDefine.DATASET) node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects) # queryset set below zpool = s.CharField(max_length=64) # validated below desc = s.SafeCharField(max_length=128, required=False) bwlimit = s.IntegerField(required=False, min_value=0, max_value=2147483647) active = s.BooleanField(default=True) schedule = s.CronField() retention = s.IntegerField() # limits set below compression = s.IntegerChoiceField(choices=BackupDefine.COMPRESSION) fsfreeze = s.BooleanField(default=False) def __init__(self, request, instance, *args, **kwargs): vm_template = kwargs.pop('vm_template', False) self._update_fields_ = list(self._update_fields_) super(BackupDefineSerializer, self).__init__(request, instance, *args, **kwargs) if not kwargs.get('many', False): dc_settings = request.dc.settings backup_nodes = get_nodes(request, is_backup=True) self.fields['node'].queryset = backup_nodes self.fields['zpool'].default = dc_settings.VMS_STORAGE_DEFAULT self.fields[ 'compression'].default = dc_settings.VMS_VM_BACKUP_COMPRESSION_DEFAULT # Set first backup node and backup node storage available in DC # (used only when called by VmDefineBackup.create_from_template()) if vm_template: try: self.fields['node'].default = first_node = backup_nodes[0] except IndexError: pass else: first_node_zpools = get_zpools(request).filter( node=first_node).values_list('zpool', flat=True) if first_node_zpools and dc_settings.VMS_STORAGE_DEFAULT not in first_node_zpools: self.fields['zpool'].default = first_node_zpools[0] if request.method != 'POST': self.fields['type'].read_only = True # Limit maximum number of backups - Issue #chili-447 if dc_settings.VMS_VM_BACKUP_LIMIT is None: min_count, max_count = RETENTION_MIN, RETENTION_MAX else: min_count, max_count = 1, int(dc_settings.VMS_VM_BACKUP_LIMIT) self.fields['retention'].validators.append( validators.MinValueValidator(min_count)) self.fields['retention'].validators.append( validators.MaxValueValidator(max_count)) if instance.vm.is_kvm(): self._update_fields_.append('fsfreeze') def validate(self, attrs): try: zpool = attrs['zpool'] except KeyError: zpool = self.object.zpool try: node = attrs['node'] except KeyError: node = self.object.node try: attrs['zpool'] = get_zpools(self.request).get(node=node, zpool=zpool) except NodeStorage.DoesNotExist: self._errors['zpool'] = s.ErrorList( [_('Zpool does not exist on node.')]) # Check total number of existing backup definitions - Issue #chili-447 if self.request.method == 'POST': limit = self.request.dc.settings.VMS_VM_BACKUP_DEFINE_LIMIT if limit is not None: total = self._model_.objects.filter(vm=self.object.vm).count() if int(limit) <= total: raise s.ValidationError( _('Maximum number of backup definitions reached.')) return attrs
class TaskLogFilterSerializer(s.Serializer): _content_type = None _object_pks = None status = s.ChoiceField(label=_('Status'), required=False, choices=TASK_STATES) object_type = s.ChoiceField(source='content_type', label=_('Object type'), required=False, choices=TASK_OBJECT_TYPES) object_name = s.CharField(label=_('Object name'), max_length=2048, required=False) show_running = s.BooleanField(label=_('Show only running tasks'), required=False, default=False) hide_auto = s.BooleanField(label=_('Hide automatic tasks'), required=False, default=False) date_from = s.DateField(label=_('Since'), required=False) date_to = s.DateField(label=_('Until'), required=False) def validate(self, attrs): object_type = attrs.get('content_type', None) object_name = attrs.get('object_name', None) # object_name depends on object_type if object_name: if not object_type: self._errors['object_type'] = s.ErrorList([ _('object_type attribute is required when ' 'filtering by object_name.') ]) return attrs self._content_type = content_type = ContentType.objects.get( model=object_type) model_class = content_type.model_class() lookup_kwargs = model_class.get_log_name_lookup_kwargs(object_name) filter_kwargs = { key + '__icontains': val for key, val in lookup_kwargs.items() } self._object_pks = list( model_class.objects.filter(**filter_kwargs).values_list( 'pk', flat=True)) return attrs def get_filters(self, pending_tasks=()): if self._object_pks is not None and not self._object_pks: # Means that we want to return empty filter results return False tz = timezone.get_current_timezone() data = self.object query = [] date_from = data.get('date_from') if date_from: date_from = datetime.combine(date_from, datetime.min.time()) query.append( Q(time__gte=date_from.replace(tzinfo=utc).astimezone(tz))) date_to = data.get('date_to') if date_to: date_to = datetime.combine(date_to, datetime.min.time()) query.append( Q(time__lte=date_to.replace(tzinfo=utc).astimezone(tz))) if self._object_pks: query.append(Q(object_pk__in=self._object_pks)) status = data.get('status') if status: query.append(Q(status=status)) if data.get('show_running'): query.append(Q(task__in=pending_tasks)) object_type = data.get('content_type') if object_type: if self._content_type: content_type = self._content_type else: content_type = ContentType.objects.get(model=object_type) query.append(Q(content_type=content_type)) if data.get('hide_auto'): query.append(~Q(task_type__in=get_task_types(tt=(TT_AUTO, )))) if query: return reduce(and_, query) else: return None
class UserSerializer(ApiKeysSerializer): """ gui.models.User """ _model_ = User _update_fields_ = ('email', 'first_name', 'last_name', 'is_super_admin', 'is_active', 'api_access', 'api_key', 'callback_key', 'groups', 'dc_bound', 'password') _default_fields_ = ('username', 'is_super_admin', 'is_active', 'api_access', 'password') username = s.RegexField(r'^[A-Za-z0-9@.+_-]*$', max_length=254) current_dc = s.SlugRelatedField(source='default_dc', slug_field='name', read_only=True, required=False) email = s.EmailField(max_length=254) first_name = s.SafeCharField(max_length=30) last_name = s.SafeCharField(max_length=30) is_super_admin = s.BooleanField(source='is_staff') is_active = s.BooleanField() api_access = s.BooleanField() groups = s.ArrayField(required=False, source='roles_api') dc_bound = s.BooleanField(source='dc_bound_bool', default=True) created = s.DateTimeField(source='date_joined', read_only=True) password = s.CharField() old_email = None # variable for value storage on email change is_staff_changed = False old_roles = () def __init__(self, request, user, *args, **kwargs): super(UserSerializer, self).__init__(request, user, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = user.dc_bound def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(UserSerializer, self)._normalize(attr, value) # noinspection PyProtectedMember @atomic def save(self, **kwargs): user = self.object new_flag = (not user.pk or getattr(user, 'new', False)) user.save() if user._roles_to_save is not None: self.old_roles = set(user.roles.all()) user.roles = user._roles_to_save # Newly created user via API is automatically marked as verified # Creator has to provide correct email, or in user profile set email as not verified (since email is required)! # Email change by user will trigger email with verification code so he can finish profile! # If admin doesnt set phone user is force to set it and when phone is changed sms verification is send if new_flag: user.userprofile.email_verified = True user.userprofile.phone_verified = True user.userprofile.save() # Changing a user email makes the email not verified # (unless request.user is not part of the staff or registration is disabled) if self.old_email and not self.request.user.is_staff and settings.REGISTRATION_ENABLED: user.userprofile.email_verified = False user.userprofile.email_token = user.userprofile.generate_token(6) user.userprofile.save() sendmail(user, 'accounts/user/base/profile_verify_subject.txt', 'accounts/user/base/profile_verify_email.txt', extra_context={ 'email_token': user.userprofile.email_token, }) def validate_username(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify elif value in INVALID_USERNAMES: raise s.ValidationError( s.WritableField.default_error_messages['invalid']) return attrs def validate_email(self, attrs, source): try: value = attrs[source] except KeyError: pass else: qs = User.objects if self.object.pk: if self.object.email == value: return attrs else: self.old_email = self.object.email qs = qs.exclude(pk=self.object.pk) # Check if someone does not use this email (or username) already if qs.filter(Q(email__iexact=value) | Q(username__iexact=value)).exists(): raise s.ValidationError( _('This email is already in use. Please supply a different email.' )) return attrs # noinspection PyMethodMayBeStatic def validate_groups(self, attrs, source): try: value = attrs[source] except KeyError: pass else: groups = [] for grp in value: try: group = Role.objects.get(name=grp) except Role.DoesNotExist: raise s.ObjectDoesNotExist(grp) else: if self.request.user.is_staff: groups.append(group) else: if group.dc_bound and self._dc_bound and group.dc_bound == self._dc_bound: groups.append(group) else: raise s.ValidationError( _('You don\'t have permission to use DC-unbound groups.' )) attrs[source] = groups return attrs def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: if not self.request.user.is_staff: raise s.NoPermissionToModify if value: data = self.init_data or {} self._dc_bound = get_dc( self.request, data.get('dc', self.request.dc.name)) else: self._dc_bound = None return attrs def validate_is_super_admin(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.is_staff != value: if self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify if self.request.user.is_staff: self.is_staff_changed = self.object.is_staff != value else: raise s.NoPermissionToModify return attrs def validate_is_active(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.is_active != value and self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify return attrs def validate_api_access(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if self.object.api_access != value and self.object.id == settings.ADMIN_USER: raise s.NoPermissionToModify return attrs def validate(self, attrs): # User is or will be bound to this DC dc = self._dc_bound if attrs.get('dc_bound_bool', self.object.dc_bound_bool) and attrs.get( 'is_staff', self.object.is_staff): self._errors['dc_bound'] = _( 'A SuperAdmin user cannot be DC-bound.') if dc: # User is or will be member of these groups try: groups = attrs['roles_api'] except KeyError: if self.object.pk: groups = self.object.roles.all() else: groups = () # A DC-bound user cannot be a member of a group that is assigned to another DC other than user.dc_bound if Dc.objects.filter(roles__in=groups).exclude(id=dc.id).exists(): self._errors['dc_bound'] = s.ErrorList([ _("User's group(s) are attached into another datacenter(s)." ) ]) return attrs def _setattr(self, instance, source, value): """Update user password if parameter was passed from es""" if source == 'password': self.object.set_password(value) else: # noinspection PyProtectedMember super(UserSerializer, self)._setattr(instance, source, value) def detail_dict(self, **kwargs): dd = super(UserSerializer, self).detail_dict(**kwargs) # Remove sensitive data from detail dict if 'password' in dd: dd['password'] = '******' if 'api_key' in dd: dd['api_key'] = '***' if 'callback_key' in dd: dd['callback_key'] = '***' return dd def to_native(self, obj): """Updated so we don't display password hash""" ret = super(UserSerializer, self).to_native(obj) if 'password' in ret: del ret['password'] if 'api_key' in ret: ret['api_key'] = '***' if 'callback_key' in ret: ret['callback_key'] = '***' return ret
class NodeDefineSerializer(s.InstanceSerializer): """ vms.models.Node """ error_negative_resources = s.ErrorList( [_('Value is too low because of existing virtual machines.')]) _model_ = Node _update_fields_ = ('status', 'owner', 'is_compute', 'is_backup', 'cpu_coef', 'ram_coef', 'monitoring_hostgroups', 'monitoring_templates') hostname = s.CharField(read_only=True) uuid = s.CharField(read_only=True) address = s.CharField(read_only=True) status = s.IntegerChoiceField(choices=Node.STATUS_DB) node_status = s.DisplayChoiceField(source='status', choices=Node.STATUS_DB, read_only=True) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, read_only=False) is_head = s.BooleanField(read_only=True) is_compute = s.BooleanField() is_backup = s.BooleanField() cpu = s.IntegerField(source='cpu_total', read_only=True) ram = s.IntegerField(source='ram_total', read_only=True) cpu_coef = s.DecimalField(min_value=0, max_digits=4, decimal_places=2) ram_coef = s.DecimalField(min_value=0, max_value=1, max_digits=4, decimal_places=2) cpu_free = s.IntegerField(read_only=True) ram_free = s.IntegerField(read_only=True) ram_kvm_overhead = s.IntegerField(read_only=True) sysinfo = s.Field( source='api_sysinfo') # Field is read_only=True by default monitoring_hostgroups = s.ArrayField(max_items=16, default=[]) monitoring_templates = s.ArrayField(max_items=32, default=[]) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, instance, *args, **kwargs): super(NodeDefineSerializer, self).__init__(request, instance, *args, **kwargs) self.clear_cache = False self.status_changed = False self.monitoring_changed = False if not kwargs.get('many', False): # Used for update_node_resources() self._cpu_coef = instance.cpu_coef self._ram_coef = instance.ram_coef # Only active users self.fields['owner'].queryset = get_owners(request) def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Compute node')) return attrs def validate_status(self, attrs, source): """Mark the status change -> used for triggering the signal. Do not allow a manual status change from unlicensed status.""" try: value = attrs[source] except KeyError: return attrs if self.object.status != value: node = self.object if node.is_unlicensed(): raise s.ValidationError( _('Cannot change status. Please add a valid license first.' )) if node.is_unreachable() or node.is_offline( ): # Manual switch from unreachable and offline state if settings.DEBUG: logger.warning( 'DEBUG mode on => skipping status checking of node %s', self.object) elif not node_ping(self.object, all_workers=False ): # requires that node is really online raise s.ValidationError( _('Cannot change status. Compute node is down.')) self.clear_cache = True self.status_changed = value return attrs def validate_is_compute(self, attrs, source): """Search for defined VMs when turning compute capability off""" if source in attrs and self.object.is_compute != attrs[source]: if self.object.vm_set.exists(): raise s.ValidationError(_('Found existing VMs on node.')) self.clear_cache = True return attrs def validate_is_backup(self, attrs, source): """Search for existing backup definitions, which are using this node""" if source in attrs and self.object.is_backup != attrs[source]: if self.object.backupdefine_set.exists(): raise s.ValidationError( _('Found existing VM backup definitions.')) self.clear_cache = True # Check existing backups when removing node if self.request.method == 'DELETE': if self.object.backup_set.exists(): raise s.ValidationError(_('Found existing VM backups.')) self.clear_cache = True return attrs def validate_monitoring_hostgroups(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_hostgroups != attrs[ source]: self.monitoring_changed = True return attrs def validate_monitoring_templates(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_templates != attrs[ source]: self.monitoring_changed = True return attrs @property def update_node_resources(self): """True if cpu_coef or ram_coef changed""" return not (self.object.cpu_coef == self._cpu_coef and self.object.ram_coef == self._ram_coef)
class VmMigrateSerializer(s.Serializer): node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects, required=False) root_zpool = s.CharField(max_length=64, required=False) disk_zpools = DiskPoolDictField(required=False) def __init__(self, request, vm, *args, **kwargs): self.img_required = None self.request = request self.vm = vm super(VmMigrateSerializer, self).__init__(*args, **kwargs) self.fields['node'].queryset = get_nodes(request, is_compute=True) self._disks = vm.json_active_get_disks() if vm.is_kvm(): self.fields['disk_zpools'].max_items = len(self._disks) else: del self.fields['disk_zpools'] def validate_disk_zpools(self, attrs, source): disk_zpools = attrs.get(source, None) if disk_zpools: if max(disk_zpools.keys()) > len(self._disks): raise s.ValidationError(_('Invalid disk_id.')) return attrs def validate_node(self, attrs, source): """Basic node validation""" node = attrs.get(source, None) if not node: attrs.pop(source, None) return attrs vm = self.vm if node == vm.node: raise s.ValidationError( _('Target node is the same as current node.')) if node.status != Node.ONLINE: raise s.ValidationError(_('Target node is not in online state.')) # Check nic tags try: validate_nic_tags(vm, new_node=node) except s.ValidationError: raise s.ValidationError( _('Some networks are not available on target node.')) return attrs def validate(self, attrs): vm = self.vm node = attrs.get('node', vm.node) changing_node = attrs.get('node', vm.node) != vm.node # Ghost VM is a copy of a VM used to take up place in DB. # When node is changing we have to have all disks in a ghost VM. # When changing only disk pools, only the changed disks have to be in a ghost VM. ghost_vm = SlaveVm(_master_vm=vm) ghost_vm.reserve_resources = changing_node ghost_vm.set_migration_hostname() ghost_vm.node = node ghost_vm_define = SlaveVmDefine(ghost_vm) # Validate root_zpool (we can do this after we know the new node) root_zpool = attrs.get('root_zpool', None) # Every pool must be validated when changing node try: root_zpool = ghost_vm_define.save_root_zpool( root_zpool, save_same_zpool=changing_node) except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs # Validate disk_zpools (we can do this after we know the new node) if ghost_vm.vm.is_kvm(): disk_zpools = attrs.get('disk_zpools', {}) try: disk_zpools = ghost_vm_define.save_disk_zpools( disk_zpools, save_same_zpool=changing_node) except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs else: disk_zpools = {} # Nothing changed, he? if not changing_node and not (root_zpool or disk_zpools): raise s.ValidationError(_('Nothing to do.')) # Validate dc_node resources try: ghost_vm_define.validate_node_resources( ignore_cpu_ram=not changing_node) except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs # Validate storage resources try: ghost_vm_define.validate_storage_resources() except APIValidationError as exc: self._errors['node'] = exc.api_errors return attrs # Validate images self.img_required = ghost_vm_define.check_required_images() # Save params # noinspection PyAttributeOutsideInit self._root_zpool = root_zpool # noinspection PyAttributeOutsideInit self._disk_zpools = disk_zpools # noinspection PyAttributeOutsideInit self.ghost_vm_define = ghost_vm_define # noinspection PyAttributeOutsideInit self.changing_node = changing_node return attrs def save_ghost_vm(self): self.ghost_vm_define.save() return self.ghost_vm_define.slave_vm def node_image_import(self): if self.img_required: ns, img = self.img_required return NodeImageView.import_for_vm(self.request, ns, img, self.vm) return None @property def esmigrate_cmd(self): """Create esmigrate command""" vm = self.vm get_json = 'vmadm get %s 2>/dev/null' % vm.uuid params = [] if self.changing_node: node = self.object['node'] params.append('-H %s' % node.address) ssh = 'ssh -o ConnectTimeout=10 -o BatchMode=yes -o StrictHostKeyChecking=no ' \ '-o GSSAPIKeyExchange=no -o GSSAPIAuthentication=no -o LogLevel=QUIET -l root' get_json = '%s %s "%s"' % (ssh, node.address, get_json) if self._root_zpool: params.append('-p %s' % self._root_zpool) if self._disk_zpools: for n, zpool in self._disk_zpools.items(): n -= 1 params.append('-%s %s' % (n, zpool)) return 'esmigrate migrate %s %s >&2; ' % (vm.uuid, ' '.join(params)) + get_json def detail_dict(self, **kwargs): dd = {} if self.changing_node: dd['node'] = self.object['node'].hostname if self._root_zpool: dd['root_zpool'] = self._root_zpool if self._disk_zpools: dd['disk_zpools'] = self._disk_zpools return dd
class DcSettingsSerializer(s.InstanceSerializer): """ vms.models.Dc.settings """ _global_settings = None _model_ = Dc modules = settings.MODULES # Used in gui forms third_party_modules = [] # Class level storage, updated only with the decorator function third_party_settings = [] # Class level storage, updated only with the decorator function # List of settings which cannot be changed when set to False in (local_)settings.py (booleans only) _override_disabled_ = settings.MODULES _blank_fields_ = frozenset({ 'SITE_LOGO', 'SITE_ICON', 'SHADOW_EMAIL', 'SUPPORT_PHONE', 'VMS_DISK_IMAGE_DEFAULT', 'VMS_DISK_IMAGE_ZONE_DEFAULT', 'VMS_DISK_IMAGE_LX_ZONE_DEFAULT', 'VMS_NET_DEFAULT', 'VMS_STORAGE_DEFAULT', 'MON_ZABBIX_HTTP_USERNAME', 'MON_ZABBIX_HTTP_PASSWORD', 'MON_ZABBIX_HOST_VM_PROXY', 'DNS_SOA_DEFAULT', 'EMAIL_HOST_USER', 'EMAIL_HOST_PASSWORD', 'SMS_FROM_NUMBER', 'SMS_SERVICE_USERNAME', 'SMS_SERVICE_PASSWORD', }) _null_fields_ = frozenset({ 'VMS_VM_DEFINE_LIMIT', 'VMS_VM_SNAPSHOT_DEFINE_LIMIT', 'VMS_VM_SNAPSHOT_LIMIT_AUTO', 'VMS_VM_SNAPSHOT_LIMIT_MANUAL', 'VMS_VM_SNAPSHOT_LIMIT_MANUAL_DEFAULT', 'VMS_VM_SNAPSHOT_SIZE_LIMIT', 'VMS_VM_SNAPSHOT_SIZE_LIMIT_DEFAULT', 'VMS_VM_SNAPSHOT_DC_SIZE_LIMIT', 'VMS_VM_BACKUP_DEFINE_LIMIT', 'VMS_VM_BACKUP_LIMIT', 'VMS_VM_BACKUP_DC_SIZE_LIMIT', 'VMS_NET_LIMIT', 'VMS_IMAGE_VM', 'VMS_IMAGE_LIMIT', 'VMS_ISO_LIMIT' }) dc = s.CharField(label=_('Datacenter'), read_only=True) # Modules VMS_VM_SNAPSHOT_ENABLED = s.BooleanField(label=_('Snapshots')) VMS_VM_BACKUP_ENABLED = s.BooleanField(label=_('Backups')) MON_ZABBIX_ENABLED = s.BooleanField(label=_('Monitoring')) DNS_ENABLED = s.BooleanField(label=_('DNS')) SUPPORT_ENABLED = s.BooleanField(label=_('Support')) REGISTRATION_ENABLED = s.BooleanField(label=_('Registration')) FAQ_ENABLED = s.BooleanField(label=_('FAQ')) # Not part of MODULES (can be overridden even if disabled in settings) # Advanced settings VMS_VM_DOMAIN_DEFAULT = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._/-]*$', label='VMS_VM_DOMAIN_DEFAULT', max_length=255, min_length=3, help_text=_('Default domain part of the hostname of a newly ' 'created virtual server.')) COMPANY_NAME = s.CharField(label='COMPANY_NAME', max_length=255, help_text=_('Name of the company using this virtual datacenter.')) SITE_NAME = s.CharField(label='SITE_NAME', max_length=255, help_text=_('Name of this site used mostly in email and text message templates.')) SITE_LINK = s.CharField(label='SITE_LINK', max_length=255, help_text=_('Link to this site used mostly in email and text message templates.')) SITE_SIGNATURE = s.CharField(label='SITE_SIGNATURE', max_length=255, help_text=_('Signature attached to outgoing emails related ' 'to this virtual datacenter.')) SITE_LOGO = s.URLField(label='SITE_LOGO', max_length=2048, required=False, help_text=_('URL pointing to an image, which will be displayed as a logo on the main page. ' 'If empty the default Danube Cloud logo will be used.')) SITE_ICON = s.URLField(label='SITE_ICON', max_length=2048, required=False, help_text=_('URL pointing to an image, which will be displayed as an icon in the navigation ' 'bar. If empty the default Danube Cloud icon will be used.')) SUPPORT_EMAIL = s.EmailField(label='SUPPORT_EMAIL', max_length=255, help_text=_('Destination email address used for all support tickets ' 'related to this virtual datacenter.')) SUPPORT_PHONE = s.CharField(label='SUPPORT_PHONE', max_length=255, required=False, help_text=_('Phone number displayed in the support contact details.')) SUPPORT_USER_CONFIRMATION = s.BooleanField(label='SUPPORT_USER_CONFIRMATION', help_text=_('Whether to send a confirmation email to the user after ' 'a support ticket has been sent to SUPPORT_EMAIL.')) DEFAULT_FROM_EMAIL = s.EmailField(label='DEFAULT_FROM_EMAIL', max_length=255, help_text=_('Email address used as the "From" address for all outgoing emails ' 'related to this virtual datacenter.')) EMAIL_ENABLED = s.BooleanField(label='EMAIL_ENABLED', help_text=_('Whether to completely disable sending of emails ' 'related to this virtual datacenter.')) API_LOG_USER_CALLBACK = s.BooleanField(label='API_LOG_USER_CALLBACK', help_text=_('Whether to log API user callback requests into the tasklog.')) VMS_ZONE_ENABLED = s.BooleanField(label='VMS_ZONE_ENABLED', # Module help_text=_('Whether to enable support for SunOS and Linux zones in ' 'this virtual datacenter.')) VMS_VM_DEFINE_LIMIT = s.IntegerField(label='VMS_VM_DEFINE_LIMIT', required=False, help_text=_('Maximum number of virtual servers that can be defined in ' 'this virtual datacenter.')) VMS_VM_CPU_CAP_REQUIRED = s.BooleanField(label='VMS_VM_CPU_CAP_REQUIRED', help_text='When disabled, the vCPUs server parameter on SunOS and Linux ' 'Zones can be set to 0, which removes the compute node CPU ' 'limit (cpu_cap) for the virtual server.') VMS_VM_STOP_TIMEOUT_DEFAULT = s.IntegerField(label='VMS_VM_STOP_TIMEOUT_DEFAULT', help_text='Default time period (in seconds) for a graceful VM stop or ' 'reboot, after which a force stop/reboot is send to the VM ' '(KVM only).') VMS_VM_STOP_WIN_TIMEOUT_DEFAULT = s.IntegerField(label='VMS_VM_STOP_WIN_TIMEOUT_DEFAULT', help_text='This is the same setting as VMS_VM_STOP_TIMEOUT_DEFAULT' ' but for a VM with Windows OS type, which usually takes' ' longer to shutdown.') VMS_VM_OSTYPE_DEFAULT = s.IntegerChoiceField(label='VMS_VM_OSTYPE_DEFAULT', choices=Vm.OSTYPE, help_text=_('Default operating system type. One of: 1 - Linux VM, ' '2 - SunOS VM, 3 - BSD VM, 4 - Windows VM, ' '5 - SunOS Zone, 6 - Linux Zone.')) VMS_VM_MONITORED_DEFAULT = s.BooleanField(label='VMS_VM_MONITORED_DEFAULT', help_text=_('Controls whether server synchronization with the monitoring ' 'system is enabled by default.')) VMS_VM_CPU_SHARES_DEFAULT = s.IntegerField(label='VMS_VM_CPU_SHARES_DEFAULT', min_value=0, max_value=1048576, help_text=_("Default value of the server's CPU shares, " "relative to other servers.")) VMS_VM_ZFS_IO_PRIORITY_DEFAULT = s.IntegerField(label='VMS_VM_ZFS_IO_PRIORITY_DEFAULT', min_value=0, max_value=1024, help_text=_("Default value of the server's IO throttling " "priority, relative to other servers.")) VMS_VM_RESOLVERS_DEFAULT = s.IPAddressArrayField(label='VMS_VM_RESOLVERS_DEFAULT', max_items=8, help_text=_('Default DNS resolvers used for newly ' 'created servers.')) VMS_VM_SSH_KEYS_DEFAULT = s.ArrayField(label='VMS_VM_SSH_KEYS_DEFAULT', max_items=32, required=False, help_text=_('List of public SSH keys added to every virtual machine ' 'in this virtual datacenter.')) VMS_VM_MDATA_DEFAULT = s.MetadataField(label='VMS_VM_MDATA_DEFAULT', required=False, validators=(validate_mdata(Vm.RESERVED_MDATA_KEYS),), help_text=_('Default VM metadata (key=value string pairs).')) VMS_DISK_MODEL_DEFAULT = s.ChoiceField(label='VMS_DISK_MODEL_DEFAULT', choices=Vm.DISK_MODEL, help_text=_('Default disk model of newly created server disks. One of: ' 'virtio, ide, scsi.')) VMS_DISK_COMPRESSION_DEFAULT = s.ChoiceField(label='VMS_DISK_COMPRESSION_DEFAULT', choices=Vm.DISK_COMPRESSION, help_text=_('Default disk compression algorithm. ' 'One of: off, lzjb, gzip, gzip-N, zle, lz4.')) VMS_DISK_IMAGE_DEFAULT = s.CharField(label='VMS_DISK_IMAGE_DEFAULT', max_length=64, required=False, help_text=_('Name of the default disk image used for ' 'newly created server disks.')) VMS_DISK_IMAGE_ZONE_DEFAULT = s.CharField(label='VMS_DISK_IMAGE_ZONE_DEFAULT', max_length=64, required=False, help_text=_('Name of the default disk image used for ' 'newly created SunOS zone servers.')) VMS_DISK_IMAGE_LX_ZONE_DEFAULT = s.CharField(label='VMS_DISK_IMAGE_LX_ZONE_DEFAULT', max_length=64, required=False, help_text=_('Name of the default disk image used for ' 'newly created Linux zone servers.')) VMS_NIC_MODEL_DEFAULT = s.ChoiceField(label='VMS_NIC_MODEL_DEFAULT', choices=Vm.NIC_MODEL, help_text=_('Default virtual NIC model of newly created server NICs. ' 'One of: virtio, e1000, rtl8139.')) VMS_NIC_MONITORING_DEFAULT = s.IntegerField(label='VMS_NIC_MONITORING_DEFAULT', min_value=NIC_ID_MIN, max_value=NIC_ID_MAX, help_text=_('Default NIC ID, which will be used for ' 'external monitoring.')) VMS_NET_DEFAULT = s.CharField(label='VMS_NET_DEFAULT', max_length=64, required=False, help_text=_('Name of the default network used for newly created server NICs.')) VMS_NET_LIMIT = s.IntegerField(label='VMS_NET_LIMIT', required=False, help_text=_('Maximum number of DC-bound networks that can be created in ' 'this virtual datacenter.')) VMS_NET_VLAN_RESTRICT = s.BooleanField(label='VMS_NET_VLAN_RESTRICT', help_text=_('Whether to restrict VLAN IDs to the ' 'VMS_NET_VLAN_ALLOWED list.')) VMS_NET_VLAN_ALLOWED = s.IntegerArrayField(label='VMS_NET_VLAN_ALLOWED', required=False, help_text=_('List of VLAN IDs available for newly created DC-bound ' 'networks in this virtual datacenter.')) VMS_NET_VXLAN_RESTRICT = s.BooleanField(label='VMS_NET_VXLAN_RESTRICT', help_text=_('Whether to restrict VXLAN IDs to the ' 'VMS_NET_VXLAN_ALLOWED list.')) VMS_NET_VXLAN_ALLOWED = s.IntegerArrayField(label='VMS_NET_VXLAN_ALLOWED', required=False, help_text=_('List of VXLAN IDs available for newly created DC-bound ' 'networks in this virtual datacenter.')) VMS_IMAGE_LIMIT = s.IntegerField(label='VMS_IMAGE_LIMIT', required=False, help_text=_('Maximum number of DC-bound server images that can be created in ' 'this virtual datacenter.')) VMS_ISO_LIMIT = s.IntegerField(label='VMS_ISO_LIMIT', required=False, help_text=_('Maximum number of DC-bound ISO images that can be created in ' 'this virtual datacenter.')) VMS_STORAGE_DEFAULT = s.CharField(label='VMS_STORAGE_DEFAULT', max_length=64, required=False, help_text=_('Name of the default storage used for newly created servers ' 'and server disks.')) VMS_VGA_MODEL_DEFAULT = s.ChoiceField(label='VMS_VGA_MODEL_DEFAULT', choices=Vm.VGA_MODEL, help_text=_('Default VGA emulation driver of newly created servers. ' 'One of: std, cirrus, vmware.')) VMS_VM_SNAPSHOT_DEFINE_LIMIT = s.IntegerField(label='VMS_VM_SNAPSHOT_DEFINE_LIMIT', required=False, help_text=_('Maximum number of snapshot definitions per server.')) VMS_VM_SNAPSHOT_LIMIT_AUTO = s.IntegerField(label='VMS_VM_SNAPSHOT_LIMIT_AUTO', required=False, help_text=_('Maximum number of automatic snapshots per server.')) VMS_VM_SNAPSHOT_LIMIT_MANUAL = s.IntegerField(label='VMS_VM_SNAPSHOT_LIMIT_MANUAL', required=False, help_text=_('Maximum number of manual snapshots per server.')) VMS_VM_SNAPSHOT_LIMIT_MANUAL_DEFAULT = s.IntegerField(label='VMS_VM_SNAPSHOT_LIMIT_MANUAL_DEFAULT', required=False, help_text=_('Predefined manual snapshot limit ' 'for new servers.')) VMS_VM_SNAPSHOT_SIZE_LIMIT = s.IntegerField(label='VMS_VM_SNAPSHOT_SIZE_LIMIT', required=False, help_text=_('Maximum size (MB) of all snapshots per server.')) VMS_VM_SNAPSHOT_SIZE_LIMIT_DEFAULT = s.IntegerField(label='VMS_VM_SNAPSHOT_SIZE_LIMIT_DEFAULT', required=False, help_text=_('Predefined snapshot size limit (MB) for new ' 'servers.')) VMS_VM_SNAPSHOT_DC_SIZE_LIMIT = s.IntegerField(label='VMS_VM_SNAPSHOT_DC_SIZE_LIMIT', required=False, help_text=_('Maximum size (MB) of all snapshots in this ' 'virtual datacenter.')) VMS_VM_BACKUP_DEFINE_LIMIT = s.IntegerField(label='VMS_VM_BACKUP_DEFINE_LIMIT', required=False, help_text=_('Maximum number of backup definitions per server.')) VMS_VM_BACKUP_LIMIT = s.IntegerField(label='VMS_VM_BACKUP_LIMIT', required=False, help_text=_('Upper retention limit used for new backup definitions.')) VMS_VM_BACKUP_DC_SIZE_LIMIT = s.IntegerField(label='VMS_VM_BACKUP_DC_SIZE_LIMIT', required=False, help_text=_('Maximum size (MB) of all backups in this ' 'virtual datacenter.')) VMS_VM_BACKUP_COMPRESSION_DEFAULT = s.ChoiceField(label='VMS_VM_BACKUP_COMPRESSION_DEFAULT', choices=BackupDefine.COMPRESSION, help_text=_('Predefined compression algorithm for ' 'new file backups.')) DNS_PTR_DEFAULT = s.CharField(label='DNS_PTR_DEFAULT', max_length=255, min_length=4, help_text=_("Default value used for reverse DNS records of virtual server " "NIC's IP addresses. Available placeholders are: " "{ipaddr}, {hostname}, {alias}.")) MON_ZABBIX_SERVER = s.RegexField(r'^https?://.*$', label='MON_ZABBIX_SERVER', max_length=1024, help_text=_('URL address of Zabbix server used for external monitoring of servers ' 'in this virtual datacenter. WARNING: Changing this and other ' 'MON_ZABBIX_* values in default virtual datacenter will ' 'affect the built-in internal monitoring of servers and ' 'compute nodes.')) MON_ZABBIX_SERVER_SSL_VERIFY = s.BooleanField(label='MON_ZABBIX_SERVER_SSL_VERIFY', help_text=_('Whether to perform HTTPS certificate verification when ' 'connecting to the Zabbix API.')) MON_ZABBIX_TIMEOUT = s.IntegerField(label='MON_ZABBIX_TIMEOUT', min_value=1, max_value=180, help_text=_('Timeout in seconds used for connections to the Zabbix API.')) MON_ZABBIX_USERNAME = s.CharField(label='MON_ZABBIX_USERNAME', max_length=255, help_text=_('Username used for connecting to the Zabbix API.')) MON_ZABBIX_PASSWORD = s.CharField(label='MON_ZABBIX_PASSWORD', max_length=255, help_text=_('Password used for connecting to the Zabbix API.')) MON_ZABBIX_HTTP_USERNAME = s.CharField(label='MON_ZABBIX_HTTP_USERNAME', max_length=255, required=False, help_text=_('Username used for the HTTP basic authentication required for ' 'connections to the Zabbix API.')) MON_ZABBIX_HTTP_PASSWORD = s.CharField(label='MON_ZABBIX_HTTP_PASSWORD', max_length=255, required=False, help_text=_('Password used for the HTTP basic authentication required for ' 'connections to the Zabbix API.')) MON_ZABBIX_VM_SLA = s.BooleanField(label='MON_ZABBIX_VM_SLA', help_text=_('Whether to fetch and display the SLA value of virtual servers.')) MON_ZABBIX_VM_SYNC = s.BooleanField(label='MON_ZABBIX_VM_SYNC', help_text=_('Whether newly created virtual servers can be automatically ' 'synchronized with the monitoring server.')) MON_ZABBIX_HOSTGROUP_VM = s.SafeCharField(label='MON_ZABBIX_HOSTGROUP_VM', max_length=255, help_text=_('Existing Zabbix host group, which will be used for all ' 'monitored servers in this virtual datacenter.')) MON_ZABBIX_HOSTGROUPS_VM = s.ArrayField(label='MON_ZABBIX_HOSTGROUPS_VM', max_items=32, required=False, help_text=_('List of Zabbix host groups, which will be used ' 'for all monitored servers in this virtual datacenter. ' 'Available placeholders are: {ostype}, {ostype_text}, ' '{disk_image}, {disk_image_abbr}, {dc_name}.')) MON_ZABBIX_HOSTGROUPS_VM_RESTRICT = s.BooleanField(label='MON_ZABBIX_HOSTGROUPS_VM_RESTRICT', help_text=_('Whether to restrict Zabbix host group names to the ' 'MON_ZABBIX_HOSTGROUPS_VM_ALLOWED list.')) MON_ZABBIX_HOSTGROUPS_VM_ALLOWED = s.ArrayField(label='MON_ZABBIX_HOSTGROUPS_VM_ALLOWED', max_items=32, required=False, help_text=_('List of Zabbix host groups that can be used by servers' ' in this virtual datacenter. Available placeholders' ' are: {ostype}, {ostype_text}, {disk_image},' ' {disk_image_abbr}, {dc_name}.')) MON_ZABBIX_TEMPLATES_VM = s.ArrayField(label='MON_ZABBIX_TEMPLATES_VM', max_items=128, required=False, help_text=_('List of existing Zabbix templates, which will be used for all ' 'monitored servers in this virtual datacenter. ' 'Available placeholders are: {ostype}, {ostype_text}, ' '{disk_image}, {disk_image_abbr}, {dc_name}.')) MON_ZABBIX_TEMPLATES_VM_MAP_TO_TAGS = s.BooleanField(label='MON_ZABBIX_TEMPLATES_VM_MAP_TO_TAGS', help_text=_('Whether to find and use existing Zabbix templates' ' according to tags of a monitored ' 'virtual server.')) MON_ZABBIX_TEMPLATES_VM_RESTRICT = s.BooleanField(label='MON_ZABBIX_TEMPLATES_VM_RESTRICT', help_text=_('Whether to restrict Zabbix template names to the ' 'MON_ZABBIX_TEMPLATES_VM_ALLOWED list.')) MON_ZABBIX_TEMPLATES_VM_ALLOWED = s.ArrayField(label='MON_ZABBIX_TEMPLATES_VM_ALLOWED', max_items=128, required=False, help_text=_('List of Zabbix templates that can be used by servers ' 'in this virtual datacenter. Available placeholders are:' ' {ostype}, {ostype_text}, {disk_image},' ' {disk_image_abbr}, {dc_name}.')) MON_ZABBIX_TEMPLATES_VM_NIC = s.ArrayField(label='MON_ZABBIX_TEMPLATES_VM_NIC', max_items=16, required=False, help_text=_('List of Zabbix templates that will be used for all ' 'monitored servers, for every virtual NIC of a server. ' 'Available placeholders are: {net}, {nic_id} + ' 'MON_ZABBIX_TEMPLATES_VM placeholders.')) MON_ZABBIX_TEMPLATES_VM_DISK = s.ArrayField(label='MON_ZABBIX_TEMPLATES_VM_DISK', max_items=16, required=False, help_text=_('List of Zabbix templates that will be used for all ' 'monitored servers, for every virtual disk of a server. ' 'Available placeholders: {disk}, {disk_id} + ' 'MON_ZABBIX_TEMPLATES_VM placeholders.')) MON_ZABBIX_HOST_VM_PROXY = s.CharField(label='MON_ZABBIX_HOST_VM_PROXY', min_length=1, max_length=128, required=False, help_text=_('Name or ID of the monitoring proxy, which will be used to ' 'monitor all monitored virtual servers.')) def __init__(self, request, dc, *args, **kwargs): # noinspection PyNoneFunctionAssignment global_settings = self.get_global_settings() if global_settings and not dc.is_default(): # Displaying global settings for non default DC dc1_settings = DefaultDc().settings # These setting should be read-only and read from default DC dc_settings = DefAttrDict(dc.custom_settings, defaults=dc1_settings) # instance else: dc1_settings = None dc_settings = dc.settings # instance self.dc_settings = dc_settings dc_settings['dc'] = dc.name super(DcSettingsSerializer, self).__init__(request, dc_settings, *args, **kwargs) self._update_fields_ = self.fields.keys() self._update_fields_.remove('dc') self.settings = {} self.dc = dc if dc1_settings is not None: for i in global_settings: self.fields[i].read_only = True @classmethod def get_global_settings(cls): if cls._global_settings is None: # noinspection PyUnresolvedReferences cls._global_settings = frozenset(set(cls.base_fields.keys()) - set(DcSettingsSerializer.base_fields.keys())) return cls._global_settings @staticmethod def _filter_sensitive_data(dictionary): """Replace sensitive data in input dict with ***""" for key in dictionary.keys(): if any([i in key for i in SENSITIVE_FIELD_NAMES]): dictionary[key] = SENSITIVE_FIELD_VALUE return dictionary def _setattr(self, instance, source, value): # noinspection PyProtectedMember super(DcSettingsSerializer, self)._setattr(instance, source, value) self.settings[source] = value def detail_dict(self, **kwargs): # Remove sensitive data from detail dict return self._filter_sensitive_data(super(DcSettingsSerializer, self).detail_dict(**kwargs)) @property def data(self): if self._data is None: # Remove sensitive data from output self._data = self._filter_sensitive_data(super(DcSettingsSerializer, self).data) return self._data # noinspection PyPep8Naming def validate_VMS_VM_DOMAIN_DEFAULT(self, attrs, source): if self.dc_settings.DNS_ENABLED: try: value = attrs[source] except KeyError: pass else: try: domain = Domain.objects.get(name=value) except Domain.DoesNotExist: raise s.ValidationError(_('Object with name=%s does not exist.') % value) else: if not self.dc.domaindc_set.filter(domain_id=domain.id).exists(): raise s.ValidationError(_('Domain is not available in this datacenter.')) return attrs # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_VMS_VM_SSH_KEYS_DEFAULT(self, attrs, source): try: value = attrs[source] except KeyError: pass else: for key in value: validate_ssh_key(key) return attrs # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_DNS_PTR_DEFAULT(self, attrs, source): try: value = attrs[source] except KeyError: pass else: testvalue = placeholder_validator(value, ipaddr='test', hostname='test', alias='test') RegexValidator(r'^[a-z0-9][a-z0-9\.-]+[a-z0-9]$')(testvalue) return attrs # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_MON_ZABBIX_HOSTGROUPS_VM(self, attrs, source): return validate_array_placeholders(attrs, source, VM_KWARGS) # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_MON_ZABBIX_HOSTGROUPS_VM_ALLOWED(self, attrs, source): return validate_array_placeholders(attrs, source, VM_KWARGS) # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_MON_ZABBIX_TEMPLATES_VM(self, attrs, source): return validate_array_placeholders(attrs, source, VM_KWARGS) # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_MON_ZABBIX_TEMPLATES_VM_ALLOWED(self, attrs, source): return validate_array_placeholders(attrs, source, VM_KWARGS) # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_MON_ZABBIX_TEMPLATES_VM_NIC(self, attrs, source): return validate_array_placeholders(attrs, source, VM_KWARGS_NIC) # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_MON_ZABBIX_TEMPLATES_VM_DISK(self, attrs, source): return validate_array_placeholders(attrs, source, VM_KWARGS_DISK) def validate(self, attrs): # Check if it is possible to override a boolean setting for source, value in attrs.items(): if source in self._override_disabled_ and not getattr(settings, source, False) and value: self._errors[source] = s.ErrorList([_('Cannot override global setting.')]) del attrs[source] return attrs
class ImageSerializer(s.InstanceSerializer): """ vms.models.Image Also used in api.dc.image.serializers. """ _backup_attrs_map_ = { 'owner': 'owner_id', 'dc_bound': 'dc_bound_id', } _model_ = Image _update_fields_ = ('alias', 'version', 'dc_bound', 'owner', 'access', 'desc', 'resize', 'deploy', 'tags') # TODO: 'nic_model', 'disk_model' _default_fields_ = ('name', 'alias', 'owner') name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=32) uuid = s.CharField(read_only=True) alias = s.SafeCharField(max_length=32) version = s.SafeCharField(max_length=16, default='1.0') owner = s.SlugRelatedField(slug_field='username', queryset=User.objects) access = s.IntegerChoiceField(choices=Image.ACCESS, default=Image.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) ostype = s.IntegerChoiceField(choices=Image.OSTYPE, read_only=True) size = s.IntegerField(read_only=True) resize = s.BooleanField(default=False) deploy = s.BooleanField(default=False) # nic_model = s.ChoiceField(choices=Vm.NIC_MODEL) # KVM only # disk_model = s.ChoiceField(choices=Vm.DISK_MODEL) # KVM only tags = s.TagField(required=False, default=[]) dc_bound = s.BooleanField(source='dc_bound_bool', default=True) status = s.IntegerChoiceField(choices=Image.STATUS, read_only=True, required=False) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, img, *args, **kwargs): super(ImageSerializer, self).__init__(request, img, *args, **kwargs) if not kwargs.get('many', False): self.update_manifest = True self._dc_bound = img.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) def create_img_backup(self): """Creates a dictionary that maps Image object attributes to its values; this will be used as a backup in case the update should fail""" items = self._backup_attrs_map_ return { items.get(attr, attr): getattr(self.object, items.get(attr, attr)) for attr in self._update_fields_ } def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(ImageSerializer, self)._normalize(attr, value) def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Image')) return attrs def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: self._dc_bound = validate_dc_bound(self.request, self.object, value, _('Image')) return attrs def validate(self, attrs): db_only_manifest_keys = {'dc_bound', 'dc_bound_bool', 'owner'} if db_only_manifest_keys.issuperset(attrs.keys()): self.update_manifest = False try: alias = attrs['alias'] except KeyError: alias = self.object.alias try: version = attrs['version'] except KeyError: version = self.object.version qs = Image.objects if self.object.pk: qs = qs.exclude(pk=self.object.pk) if qs.filter(alias__iexact=alias, version=version).exists(): self._errors['alias'] = s.ErrorList([ _('This alias is already in use. ' 'Please supply a different alias or version.') ]) if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_IMAGE_LIMIT if limit is not None: if Image.objects.filter( dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError( _('Maximum number of server disk images reached')) return attrs
class VmReplicaSerializer(s.InstanceSerializer): _model_ = SlaveVm _default_fields_ = ('repname', ) _update_fields_ = ('reserve_resources', 'sleep_time', 'enabled', 'bwlimit') hostname = s.CharField(source='master_vm.hostname', read_only=True) repname = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', source='name', max_length=24, min_length=1) node = s.SlugRelatedField(slug_field='hostname', queryset=Node.objects, required=True) # Updated only by POST root_zpool = s.CharField(max_length=64, required=False) # Updated only by POST disk_zpools = DiskPoolDictField(required=False) # Updated only by POST reserve_resources = s.BooleanField( default=True) # Default value changed below during POST sleep_time = s.IntegerField(source='rep_sleep_time', min_value=0, max_value=86400, default=60) enabled = s.BooleanField(source='rep_enabled', default=True) bwlimit = s.IntegerField(source='rep_bwlimit', required=False, min_value=0, max_value=2147483647) last_sync = s.DateTimeField(read_only=True, required=False) reinit_required = s.BooleanField(source='rep_reinit_required', read_only=True, required=False) node_status = s.DisplayChoiceField(source='vm.node.status', choices=Node.STATUS_DB, read_only=True) created = s.DateTimeField(source="vm.created", read_only=True, required=False) def __init__(self, request, slave_vm, *args, **kwargs): self.img_required = None self.reserve_resources_changed = False self._detail_dict = {} super(VmReplicaSerializer, self).__init__(request, slave_vm, *args, **kwargs) if request.method == 'POST': vm = slave_vm.vm dc_settings = request.dc.settings self.fields[ 'reserve_resources'].default = dc_settings.VMS_VM_REPLICA_RESERVATION_DEFAULT self.fields['node'].queryset = get_nodes(request, is_compute=True) self._disks = vm.json_get_disks() if vm.is_hvm(): self.fields['disk_zpools'].max_items = len(self._disks) else: del self.fields['disk_zpools'] else: self.fields['node'].required = False self.fields['node'].read_only = True self.fields['root_zpool'].read_only = True self.fields['disk_zpools'].read_only = True def validate_disk_zpools(self, attrs, source): """Basic disk_zpools validation (POST only)""" disk_zpools = attrs.get(source, None) if disk_zpools: if max(disk_zpools.keys()) > len(self._disks): raise s.ValidationError(_('Invalid disk_id.')) return attrs def validate_node(self, attrs, source): """Basic node validation (POST only)""" try: node = attrs[source] except KeyError: return attrs if node == self.object.node: raise s.ValidationError( _('Target node is the same as current node.')) if node.status != Node.ONLINE: raise s.ValidationError(_('Target node is not in online state.')) # Check nic tags try: validate_nic_tags(self.object.vm, new_node=node) except s.ValidationError: raise s.ValidationError( _('Some networks are not available on target node.')) return attrs def _validate_create(self, attrs): """Validate node storage zpools, resources, ... and create slave VM (POST only)""" node = attrs['node'] self._detail_dict['node'] = node.hostname slave_vm = self.object slave_vm.set_rep_hostname() slave_vm.node = node slave_vm.reserve_resources = attrs.get('reserve_resources', True) slave_vm_define = SlaveVmDefine(slave_vm) # Validate root_zpool (we can do this after we know the new node) root_zpool = attrs.get('root_zpool', None) try: root_zpool = slave_vm_define.save_root_zpool(root_zpool) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False else: if root_zpool: self._detail_dict['root_zpool'] = root_zpool # Validate disk_zpools (we can do this after we know the new node) if slave_vm.vm.is_hvm(): disk_zpools = attrs.get('disk_zpools', {}) try: disk_zpools = slave_vm_define.save_disk_zpools(disk_zpools) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False else: if disk_zpools: self._detail_dict['disk_zpools'] = disk_zpools # Validate dc_node resources try: slave_vm_define.validate_node_resources( ignore_cpu_ram=not slave_vm.reserve_resources) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False # Validate storage resources try: slave_vm_define.validate_storage_resources() except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False # Validate images self.img_required = slave_vm_define.check_required_images() # noinspection PyAttributeOutsideInit self.slave_vm_define = slave_vm_define return True def _validate_update(self, attrs): """Validate node resources if reserve_resources changed to True""" try: reserve_resource = attrs['reserve_resources'] except KeyError: pass else: # We need to know whether the user requested change of the reserve_resources attribute self.reserve_resources_changed = reserve_resource != self.object.reserve_resources if self.reserve_resources_changed and reserve_resource: slave_vm_define = SlaveVmDefine(self.object) try: slave_vm_define.validate_node_resources( ignore_cpu_ram=False, ignore_disk=True) except s.APIValidationError as exc: self._errors['node'] = exc.api_errors return False return True def validate(self, attrs): if self.object.rep_reinit_required: raise s.ValidationError( _('Server replica requires re-initialization.')) if self.request.method == 'POST': total = SlaveVm.objects.filter( master_vm=self.object.master_vm).exclude(name=u'').count() self.object.rep_id = total + 1 limit = self.request.dc.settings.VMS_VM_REPLICA_LIMIT if limit is not None: if int(limit) <= total: raise s.ValidationError( _('Maximum number of server replicas reached.')) self._validate_create(attrs) else: # PUT self._validate_update(attrs) return attrs def save_slave_vm(self): """Initial saving of slave VM - used only by POST vm_replica""" # The only difference between a slave and master VM should be the hostname # hence we change the slave hostname temporarily to the real hostname for the purpose of sync_json() slave_vm = self.object hostname = slave_vm.vm.hostname slave_vm.vm.hostname = slave_vm.master_vm.hostname slave_vm.vm.choose_vnc_port() slave_vm.vm.sync_json() slave_vm.vm.hostname = hostname # We also don't want to save the replication state (which can be only updated by vm_replica_cb) sync_status = slave_vm.sync_status slave_vm.sync_status = SlaveVm.DIS self.slave_vm_define.save() slave_vm.sync_status = sync_status return self.slave_vm_define.slave_vm def node_image_import(self): if self.img_required: ns, img = self.img_required return NodeImageView.import_for_vm(self.request, ns, img, self.object) return None def detail_dict(self, **kwargs): # noinspection PyStatementEffect self.data dd = super(VmReplicaSerializer, self).detail_dict(**kwargs) dd.update(self._detail_dict) dd['repname'] = self.object.name return dd
class NodeDefineSerializer(s.InstanceSerializer): """ vms.models.Node """ error_negative_resources = s.ErrorList( [_('Value is too low because of existing virtual machines.')]) _model_ = Node _update_fields_ = ('status', 'owner', 'address', 'is_compute', 'is_backup', 'note', 'cpu_coef', 'ram_coef', 'monitoring_hostgroups', 'monitoring_templates') hostname = s.CharField(read_only=True) uuid = s.CharField(read_only=True) address = s.ChoiceField() status = s.IntegerChoiceField(choices=Node.STATUS_DB) node_status = s.DisplayChoiceField(source='status', choices=Node.STATUS_DB, read_only=True) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, read_only=False) is_head = s.BooleanField(read_only=True) is_compute = s.BooleanField() is_backup = s.BooleanField() note = s.CharField(required=False) cpu = s.IntegerField(source='cpu_total', read_only=True) ram = s.IntegerField(source='ram_total', read_only=True) cpu_coef = s.DecimalField(min_value=0, max_digits=4, decimal_places=2) ram_coef = s.DecimalField(min_value=0, max_value=1, max_digits=4, decimal_places=2) cpu_free = s.IntegerField(read_only=True) ram_free = s.IntegerField(read_only=True) ram_kvm_overhead = s.IntegerField(read_only=True) sysinfo = s.Field( source='api_sysinfo') # Field is read_only=True by default monitoring_hostgroups = s.ArrayField( max_items=16, default=[], validators=(RegexValidator( regex=MonitoringBackend.RE_MONITORING_HOSTGROUPS), )) monitoring_templates = s.ArrayField(max_items=32, default=[]) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, instance, *args, **kwargs): super(NodeDefineSerializer, self).__init__(request, instance, *args, **kwargs) self.clear_cache = False self.status_changed = False self.address_changed = False self.old_ip_address = None self.monitoring_changed = False if not kwargs.get('many', False): # Valid node IP addresses self.fields['address'].choices = [(ip, ip) for ip in instance.ips] # Used for update_node_resources() self._cpu_coef = instance.cpu_coef self._ram_coef = instance.ram_coef # Only active users self.fields['owner'].queryset = get_owners(request) def validate_owner(self, attrs, source): """Cannot change owner while pending tasks exist""" validate_owner(self.object, attrs.get(source, None), _('Compute node')) return attrs def validate_address(self, attrs, source): """Mark that node IP address is going to change""" new_address = attrs.get(source, None) if new_address and self.object.address != new_address: self.address_changed = True try: self.old_ip_address = self.object.ip_address except ObjectDoesNotExist: self.old_ip_address = None return attrs def validate_status(self, attrs, source): """Mark the status change -> used for triggering the signal. Do not allow a manual status change from unlicensed status.""" try: value = attrs[source] except KeyError: return attrs if self.object.status != value: node = self.object if node.is_unlicensed(): raise s.ValidationError( _('Cannot change status. Please add a valid license first.' )) if node.is_unreachable() or node.is_offline( ): # Manual switch from unreachable and offline state if settings.DEBUG: logger.warning( 'DEBUG mode on => skipping status checking of node %s', self.object) elif not node_ping(self.object, all_workers=False ): # requires that node is really online raise s.ValidationError( _('Cannot change status. Compute node is down.')) self.clear_cache = True self.status_changed = value return attrs def validate_is_compute(self, attrs, source): """Search for defined VMs when turning compute capability off""" if source in attrs and self.object.is_compute != attrs[source]: if self.object.vm_set.exists(): raise s.ValidationError(_('Found existing VMs on node.')) self.clear_cache = True return attrs def validate_is_backup(self, attrs, source): """Search for existing backup definitions, which are using this node""" if source in attrs and self.object.is_backup != attrs[source]: if self.object.backupdefine_set.exists(): raise s.ValidationError( _('Found existing VM backup definitions.')) self.clear_cache = True # Check existing backups when removing node if self.request.method == 'DELETE': if self.object.backup_set.exists(): raise s.ValidationError(_('Found existing VM backups.')) self.clear_cache = True return attrs def validate_monitoring_hostgroups(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_hostgroups != attrs[ source]: self.monitoring_changed = True return attrs def validate_monitoring_templates(self, attrs, source): """Mark the monitoring change -> used for triggering the signal""" if source in attrs and self.object.monitoring_templates != attrs[ source]: self.monitoring_changed = True return attrs @property def update_node_resources(self): """True if cpu_coef or ram_coef changed""" return not (self.object.cpu_coef == self._cpu_coef and self.object.ram_coef == self._ram_coef) def save(self): """Update compute node attributes in database""" node = self.object # NOTE: # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free try: with transaction.atomic(): node.save(update_resources=self.update_node_resources, clear_cache=self.clear_cache) if self.update_node_resources: if node.cpu_free < 0 or node.dcnode_set.filter( cpu_free__lt=0).exists(): raise IntegrityError('cpu_check') if node.ram_free < 0 or node.dcnode_set.filter( ram_free__lt=0).exists(): raise IntegrityError('ram_check') except IntegrityError as exc: errors = {} exc_error = str(exc) # ram or cpu constraint was violated on vms_dcnode (can happen when DcNode strategy is set to RESERVED) # OR a an exception was raised above if 'ram_check' in exc_error: errors['ram_coef'] = self.error_negative_resources if 'cpu_check' in exc_error: errors['cpu_coef'] = self.error_negative_resources if not errors: raise exc return errors if self.update_node_resources: # cpu_free or ram_free changed self.reload() return None
class DiskPoolDictField(s.BaseDictField): _key_field = s.IntegerField(min_value=DISK_ID_MIN + 1, max_value=DISK_ID_MAX + 1) _val_field = s.CharField(max_length=64)
class DefaultDcSettingsSerializer(DcSettingsSerializer): """ vms.models.DefaultDc.settings """ _global_settings = None default_dc_third_party_modules = [] # Class level storage, updated only with the decorator function default_dc_third_party_settings = [] # Class level storage, updated only with the decorator function ACL_ENABLED = s.BooleanField(label=_('Advanced User Management')) # Global Module API_ENABLED = s.BooleanField(label=_('API access')) # Global Module VMS_DC_ENABLED = s.BooleanField(label=_('Virtual Datacenters')) # Global Module SMS_ENABLED = s.BooleanField(label=_('SMS')) # Global Module VMS_NODE_SSH_KEYS_SYNC = s.BooleanField(label='VMS_NODE_SSH_KEYS_SYNC', help_text=_('WARNING: Do not disable this unless ' 'you know what you are doing!')) VMS_NODE_SSH_KEYS_DEFAULT = s.ArrayField(label='VMS_NODE_SSH_KEYS_DEFAULT', help_text=_('List of SSH keys to be added to compute nodes by default')) VMS_IMAGE_VM = s.SafeCharField(label='VMS_IMAGE_VM', required=False, help_text=_('Global image server (hostname or uuid) - primary IMGAPI source on all ' 'compute nodes. Empty value means that most of the image-related ' 'operations will be performed only in the DB.')) VMS_IMAGE_VM_NIC = s.IntegerField(label='VMS_IMAGE_VM_NIC', min_value=NIC_ID_MIN, max_value=NIC_ID_MAX, help_text=_('Image server\'s NIC ID, which will be used to determine the IP ' 'address for constructing the IMGAPI source set on all compute ' 'nodes.')) VMS_IMAGE_SOURCES = s.ArrayField(label='VMS_IMAGE_SOURCES', required=False, max_items=16, help_text=_('List of additional IMGAPI sources that will be set on all ' 'compute nodes.')) VMS_IMAGE_REPOSITORIES = s.URLDictField(label='VMS_IMAGE_REPOSITORIES', required=False, max_items=16, help_text=_('Object (key=name, value=URL) with remote disk image ' 'repositories available in every virtual datacenter.')) DNS_DOMAIN_TYPE_DEFAULT = s.ChoiceField(label='DNS_DOMAIN_TYPE_DEFAULT', choices=Domain.TYPE_MASTER, help_text='Default PowerDNS replication type of newly created domain.') DNS_HOSTMASTER = s.EmailField(label='DNS_HOSTMASTER', max_length=255, help_text=_('Default hostmaster email address used for SOA records ' 'of newly created domains.')) DNS_NAMESERVERS = s.ArrayField(label='DNS_NAMESERVERS', max_items=8, help_text=_('List of DNS servers used for NS records of newly created domains.' ' Set to an empty list to disable automatic creation of NS records.')) DNS_SOA_DEFAULT = s.CharField(label='DNS_SOA_DEFAULT', max_length=255, min_length=0, required=False, help_text=_('Default value for the SOA record of newly created domains. ' 'Available placeholders are: ' '{nameserver} (replaced by first nameserver in DNS_NAMESERVERS) and ' '{hostmaster} (replaced by DNS_HOSTMASTER). ' 'Set to an empty value to disable automatic creation of SOA records.')) EMAIL_HOST = s.SafeCharField(label='EMAIL_HOST', help_text=_('Hostname or IP address of the SMTP server used for all outgoing emails.')) EMAIL_PORT = s.IntegerField(label='EMAIL_PORT', min_value=1, max_value=65535, help_text=_('Port of the SMTP server.')) EMAIL_USE_TLS = s.BooleanField(label='EMAIL_USE_TLS', help_text=_('Whether to use an explicit TLS (secure) SMTP connection (STARTTLS).')) EMAIL_USE_SSL = s.BooleanField(label='EMAIL_USE_SSL', help_text=_('Whether to use an implicit TLS (secure) SMTP connection.')) EMAIL_HOST_USER = s.CharField(label='EMAIL_HOST_USER', max_length=255, required=False, help_text=_('Username for SMTP authentication.')) EMAIL_HOST_PASSWORD = s.CharField(label='EMAIL_HOST_PASSWORD', max_length=255, required=False, help_text=_('Password for SMTP authentication.')) SHADOW_EMAIL = s.EmailField(label='SHADOW_EMAIL', required=False, help_text=_('Email address to which hidden copies of all outgoing emails are sent.')) PROFILE_SSH_KEY_LIMIT = s.IntegerField(label='PROFILE_SSH_KEY_LIMIT', max_value=64, help_text=_('Maximum number of public SSH keys ' 'that can be stored in one user profile.')) PROFILE_COUNTRY_CODE_DEFAULT = s.ChoiceField(label='PROFILE_COUNTRY_CODE_DEFAULT', choices=UserProfile.COUNTRIES, help_text=_("Default country in user's profile.")) PROFILE_PHONE_PREFIX_DEFAULT = s.ChoiceField(label='PROFILE_PHONE_PREFIX_DEFAULT', choices=UserProfile.PHONE_PREFIXES, help_text=_("Default country phone prefix in user's profile.")) PROFILE_TIME_ZONE_DEFAULT = s.ChoiceField(label='PROFILE_TIME_ZONE_DEFAULT', choices=UserProfile.TIMEZONES, help_text=_("Default time zone in user's profile.")) MON_ZABBIX_NODE_SYNC = s.BooleanField(label='MON_ZABBIX_NODE_SYNC', help_text=_('Whether compute nodes should be automatically ' 'synchronized with the monitoring server.')) MON_ZABBIX_NODE_SLA = s.BooleanField(label='MON_ZABBIX_NODE_SLA', help_text=_('Whether to fetch and display the SLA value of compute nodes.')) MON_ZABBIX_HOSTGROUP_NODE = s.SafeCharField(label='MON_ZABBIX_HOSTGROUP_NODE', max_length=255, help_text=_('Existing Zabbix host group, which will be used for all ' 'monitored compute nodes.')) MON_ZABBIX_HOSTGROUPS_NODE = s.ArrayField(label='MON_ZABBIX_HOSTGROUPS_NODE', max_items=32, required=False, help_text=_('List of other Zabbix host groups, which will be ' 'used for all monitored compute nodes.')) MON_ZABBIX_TEMPLATES_NODE = s.ArrayField(label='MON_ZABBIX_TEMPLATES_NODE', max_items=128, required=False, help_text=_('List of existing Zabbix templates, which will be used for all' ' monitored compute nodes.')) SMS_PREFERRED_SERVICE = s.ChoiceField(label='SMS_PREFERRED_SERVICE', choices=get_services(), help_text=_('Currently used SMS provider.')) SMS_SERVICE_USERNAME = s.CharField(label='SMS_SERVICE_USERNAME', max_length=255, required=False, help_text=_('Username required for the selected SMS provider.')) SMS_SERVICE_PASSWORD = s.CharField(label='SMS_SERVICE_PASSWORD', max_length=255, required=False, help_text=_('Password required for the selected SMS provider.')) SMS_FROM_NUMBER = s.SafeCharField(label='SMS_FROM_NUMBER', max_length=64, required=False, help_text=_('Phone number used as sender for outgoing text messages.')) SMS_REGISTRATION_ENABLED = s.BooleanField(label='SMS_REGISTRATION_ENABLED', help_text=_("Whether to verify user's phone number during registration " "and phone number change")) # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_VMS_NODE_SSH_KEYS_DEFAULT(self, attrs, source): try: value = attrs[source] except KeyError: pass else: for key in value: validate_ssh_key(key) return attrs # noinspection PyPep8Naming def validate_VMS_IMAGE_VM(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if value: try: vm = get_vm(self.request, value, exists_ok=True, noexists_fail=True, dc_bound=False) except ObjectNotFound as exc: raise s.ValidationError(exc.detail) else: if vm.status not in (Vm.RUNNING, Vm.STOPPED): raise s.ValidationError(_('Invalid VM status; VM must be running or stopped.')) if vm.ostype != Vm.SUNOS_ZONE: raise s.ValidationError(_('Invalid OS type; VM must be a SunOS Zone.')) attrs[source] = vm.uuid return attrs # noinspection PyMethodMayBeStatic,PyPep8Naming def validate_DNS_SOA_DEFAULT(self, attrs, source): try: value = attrs[source] except KeyError: return attrs if not value: attrs[source] = '' return attrs testvalue = placeholder_validator(value, nameserver='ns01.example.com', hostmaster='hostmaster.example.com') # {nameserver} {hostmaster} 2013010100 28800 7200 604800 86400 RegexValidator(r'^([A-Za-z0-9\._/-]+)\s+([A-Za-z0-9\._-]+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)$')(testvalue) return attrs def validate(self, attrs): if attrs.get('EMAIL_USE_TLS', None) and attrs.get('EMAIL_USE_SSL', None): self._errors['EMAIL_USE_TLS'] = self._errors['EMAIL_USE_SSL'] = s.ErrorList([ _('Cannot enable EMAIL_USE_TLS and EMAIL_USE_SSL together.') ]) # Do not allow SMS registration without the SMS module if (attrs.get('SMS_REGISTRATION_ENABLED', None) and not attrs.get('SMS_ENABLED', self.request.dc.settings.SMS_ENABLED)): self._errors['SMS_REGISTRATION_ENABLED'] = s.ErrorList([_('SMS support must be enabled first.')]) return super(DefaultDcSettingsSerializer, self).validate(attrs)
class NetworkSerializer(s.InstanceSerializer): """ vms.models.Subnet """ _model_ = Subnet _update_fields_ = ('alias', 'owner', 'access', 'desc', 'network', 'netmask', 'gateway', 'resolvers', 'dns_domain', 'ptr_domain', 'nic_tag', 'vlan_id', 'dc_bound', 'dhcp_passthrough') _default_fields_ = ('name', 'alias', 'owner') _blank_fields_ = frozenset({'desc', 'dns_domain', 'ptr_domain'}) _null_fields_ = frozenset({'gateway'}) # min_length because of API URL: /network/ip/ name = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', min_length=3, max_length=32) uuid = s.CharField(read_only=True) alias = s.SafeCharField(max_length=32) owner = s.SlugRelatedField(slug_field='username', queryset=User.objects, required=False) access = s.IntegerChoiceField(choices=Subnet.ACCESS, default=Subnet.PRIVATE) desc = s.SafeCharField(max_length=128, required=False) network = s.IPAddressField() netmask = s.IPAddressField() gateway = s.IPAddressField(required=False) # can be null nic_tag = s.ChoiceField() vlan_id = s.IntegerField(min_value=0, max_value=4096) resolvers = s.IPAddressArrayField(source='resolvers_api', required=False, max_items=8) dns_domain = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=250, required=False) # can be blank ptr_domain = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$', max_length=250, required=False) # can be blank dhcp_passthrough = s.BooleanField(default=False) dc_bound = s.BooleanField(source='dc_bound_bool', default=True) created = s.DateTimeField(read_only=True, required=False) def __init__(self, request, net, *args, **kwargs): super(NetworkSerializer, self).__init__(request, net, *args, **kwargs) if not kwargs.get('many', False): self._dc_bound = net.dc_bound self.fields['owner'].queryset = get_owners(request, all=True) self.fields['nic_tag'].choices = [ (i, i) for i in DefaultDc().settings.VMS_NET_NIC_TAGS ] def _normalize(self, attr, value): if attr == 'dc_bound': return self._dc_bound # noinspection PyProtectedMember return super(NetworkSerializer, self)._normalize(attr, value) def validate_dc_bound(self, attrs, source): try: value = bool(attrs[source]) except KeyError: pass else: if value != self.object.dc_bound_bool: self._dc_bound = validate_dc_bound(self.request, self.object, value, _('Network')) return attrs def validate_alias(self, attrs, source): try: value = attrs[source] except KeyError: pass else: validate_alias(self.object, value) return attrs def validate_vlan_id(self, attrs, source): try: value = attrs[source] except KeyError: pass else: net = self.object if not net.new: # TODO: Cannot use ip__in=net_ips (ProgrammingError) net_ips = set(net.ipaddress_set.all().values_list('ip', flat=True)) other_ips = set( IPAddress.objects.exclude(subnet=net).filter( subnet__vlan_id=int(value)).values_list('ip', flat=True)) if net_ips.intersection(other_ips): raise s.ValidationError( _('Network has IP addresses that already exist in another ' 'network with the same VLAN ID.')) return attrs # noinspection PyMethodMayBeStatic def validate_ptr_domain(self, attrs, source): try: value = attrs[source] except KeyError: pass else: if value: if not value.endswith('in-addr.arpa'): raise s.ValidationError(_('Invalid PTR domain name.')) if settings.DNS_ENABLED: if not Domain.objects.filter(name=value).exists(): raise s.ObjectDoesNotExist(value) return attrs def validate(self, attrs): try: network = attrs['network'] except KeyError: network = self.object.network try: netmask = attrs['netmask'] except KeyError: netmask = self.object.netmask try: ip_network = Subnet.get_ip_network(network, netmask) if ip_network.is_reserved: raise ValueError except ValueError: self._errors['network'] = self._errors['netmask'] = \ s.ErrorList([_('Enter a valid IPv4 network and netmask.')]) if self.request.method == 'POST' and self._dc_bound: limit = self._dc_bound.settings.VMS_NET_LIMIT if limit is not None: if Subnet.objects.filter( dc_bound=self._dc_bound).count() >= int(limit): raise s.ValidationError( _('Maximum number of networks reached')) if self._dc_bound: try: vlan_id = attrs['vlan_id'] except KeyError: vlan_id = self.object.vlan_id dc_settings = self._dc_bound.settings if dc_settings.VMS_NET_VLAN_RESTRICT and vlan_id not in dc_settings.VMS_NET_VLAN_ALLOWED: self._errors['vlan_id'] = s.ErrorList( [_('VLAN ID is not available in datacenter.')]) return attrs # noinspection PyMethodMayBeStatic def update_errors(self, fields, err_msg): errors = {} for i in fields: errors[i] = s.ErrorList([err_msg]) return errors