예제 #1
0
class VmStatusSerializer(s.Serializer):
    hostname = s.CharField(read_only=True)
    uuid = s.CharField(read_only=True)
    alias = s.CharField(read_only=True)
    status = s.DisplayChoiceField(choices=Vm.STATUS, read_only=True)
    status_change = s.DateTimeField(read_only=True)
    node_status = s.DisplayChoiceField(source='node.status', choices=Node.STATUS_DB, read_only=True)
    tasks = s.CharField(source='tasks', read_only=True)
    uptime = s.IntegerField(source='uptime_actual', read_only=True)
예제 #2
0
class VmSerializer(VmBaseSerializer):
    """
    VM details (read-only)
    """
    hostname = s.Field()
    uuid = s.CharField(read_only=True)
    alias = s.Field()
    node = s.SlugRelatedField(slug_field='hostname',
                              read_only=True,
                              required=False)
    owner = s.SlugRelatedField(slug_field='username', read_only=True)
    status = s.DisplayChoiceField(choices=Vm.STATUS, read_only=True)
    node_status = s.DisplayChoiceField(source='node.status',
                                       choices=Node.STATUS_DB,
                                       read_only=True)
    vcpus = s.IntegerField(read_only=True)
    ram = s.IntegerField(read_only=True)
    disk = s.IntegerField(read_only=True)
    ips = s.ArrayField(read_only=True)
    uptime = s.IntegerField(source='uptime_actual', read_only=True)
    locked = s.BooleanField(read_only=True)
예제 #3
0
class NodeSerializer(s.Serializer):
    """
    Node details serializer (read-only).
    """
    hostname = s.Field()
    address = s.Field()
    status = s.IntegerChoiceField(choices=Node.STATUS_DB, read_only=True)
    node_status = s.DisplayChoiceField(source='status',
                                       choices=Node.STATUS_DB,
                                       read_only=True)
    owner = s.SlugRelatedField(slug_field='username', read_only=True)
    is_head = s.BooleanField(read_only=True)
    cpu = s.IntegerField(source='cpu_total', read_only=True)
    ram = s.IntegerField(source='ram_total', read_only=True)
    cpu_free = s.IntegerField(read_only=True)
    ram_free = s.IntegerField(read_only=True)
    ram_kvm_overhead = s.IntegerField(read_only=True)
예제 #4
0
class NodeDefineSerializer(s.InstanceSerializer):
    """
    vms.models.Node
    """
    error_negative_resources = s.ErrorList(
        [_('Value is too low because of existing virtual machines.')])

    _model_ = Node
    _update_fields_ = ('status', 'owner', 'is_compute', 'is_backup',
                       'cpu_coef', 'ram_coef', 'monitoring_hostgroups',
                       'monitoring_templates')

    hostname = s.CharField(read_only=True)
    uuid = s.CharField(read_only=True)
    address = s.CharField(read_only=True)
    status = s.IntegerChoiceField(choices=Node.STATUS_DB)
    node_status = s.DisplayChoiceField(source='status',
                                       choices=Node.STATUS_DB,
                                       read_only=True)
    owner = s.SlugRelatedField(slug_field='username',
                               queryset=User.objects,
                               read_only=False)
    is_head = s.BooleanField(read_only=True)
    is_compute = s.BooleanField()
    is_backup = s.BooleanField()
    cpu = s.IntegerField(source='cpu_total', read_only=True)
    ram = s.IntegerField(source='ram_total', read_only=True)
    cpu_coef = s.DecimalField(min_value=0, max_digits=4, decimal_places=2)
    ram_coef = s.DecimalField(min_value=0,
                              max_value=1,
                              max_digits=4,
                              decimal_places=2)
    cpu_free = s.IntegerField(read_only=True)
    ram_free = s.IntegerField(read_only=True)
    ram_kvm_overhead = s.IntegerField(read_only=True)
    sysinfo = s.Field(
        source='api_sysinfo')  # Field is read_only=True by default
    monitoring_hostgroups = s.ArrayField(max_items=16, default=[])
    monitoring_templates = s.ArrayField(max_items=32, default=[])
    created = s.DateTimeField(read_only=True, required=False)

    def __init__(self, request, instance, *args, **kwargs):
        super(NodeDefineSerializer, self).__init__(request, instance, *args,
                                                   **kwargs)
        self.clear_cache = False
        self.status_changed = False
        self.monitoring_changed = False

        if not kwargs.get('many', False):
            # Used for update_node_resources()
            self._cpu_coef = instance.cpu_coef
            self._ram_coef = instance.ram_coef
            # Only active users
            self.fields['owner'].queryset = get_owners(request)

    def validate_owner(self, attrs, source):
        """Cannot change owner while pending tasks exist"""
        validate_owner(self.object, attrs.get(source, None), _('Compute node'))

        return attrs

    def validate_status(self, attrs, source):
        """Mark the status change -> used for triggering the signal.
        Do not allow a manual status change from unlicensed status."""
        try:
            value = attrs[source]
        except KeyError:
            return attrs

        if self.object.status != value:
            node = self.object

            if node.is_unlicensed():
                raise s.ValidationError(
                    _('Cannot change status. Please add a valid license first.'
                      ))

            if node.is_unreachable() or node.is_offline(
            ):  # Manual switch from unreachable and offline state
                if settings.DEBUG:
                    logger.warning(
                        'DEBUG mode on => skipping status checking of node %s',
                        self.object)
                elif not node_ping(self.object, all_workers=False
                                   ):  # requires that node is really online
                    raise s.ValidationError(
                        _('Cannot change status. Compute node is down.'))

            self.clear_cache = True
            self.status_changed = value

        return attrs

    def validate_is_compute(self, attrs, source):
        """Search for defined VMs when turning compute capability off"""
        if source in attrs and self.object.is_compute != attrs[source]:
            if self.object.vm_set.exists():
                raise s.ValidationError(_('Found existing VMs on node.'))
            self.clear_cache = True

        return attrs

    def validate_is_backup(self, attrs, source):
        """Search for existing backup definitions, which are using this node"""
        if source in attrs and self.object.is_backup != attrs[source]:
            if self.object.backupdefine_set.exists():
                raise s.ValidationError(
                    _('Found existing VM backup definitions.'))
            self.clear_cache = True

        # Check existing backups when removing node
        if self.request.method == 'DELETE':
            if self.object.backup_set.exists():
                raise s.ValidationError(_('Found existing VM backups.'))
            self.clear_cache = True

        return attrs

    def validate_monitoring_hostgroups(self, attrs, source):
        """Mark the monitoring change -> used for triggering the signal"""
        if source in attrs and self.object.monitoring_hostgroups != attrs[
                source]:
            self.monitoring_changed = True

        return attrs

    def validate_monitoring_templates(self, attrs, source):
        """Mark the monitoring change -> used for triggering the signal"""
        if source in attrs and self.object.monitoring_templates != attrs[
                source]:
            self.monitoring_changed = True

        return attrs

    @property
    def update_node_resources(self):
        """True if cpu_coef or ram_coef changed"""
        return not (self.object.cpu_coef == self._cpu_coef
                    and self.object.ram_coef == self._ram_coef)
예제 #5
0
class VmReplicaSerializer(s.InstanceSerializer):
    _model_ = SlaveVm
    _default_fields_ = ('repname', )
    _update_fields_ = ('reserve_resources', 'sleep_time', 'enabled', 'bwlimit')

    hostname = s.CharField(source='master_vm.hostname', read_only=True)
    repname = s.RegexField(r'^[A-Za-z0-9][A-Za-z0-9\._-]*$',
                           source='name',
                           max_length=24,
                           min_length=1)
    node = s.SlugRelatedField(slug_field='hostname',
                              queryset=Node.objects,
                              required=True)  # Updated only by POST
    root_zpool = s.CharField(max_length=64,
                             required=False)  # Updated only by POST
    disk_zpools = DiskPoolDictField(required=False)  # Updated only by POST
    reserve_resources = s.BooleanField(
        default=True)  # Default value changed below during POST
    sleep_time = s.IntegerField(source='rep_sleep_time',
                                min_value=0,
                                max_value=86400,
                                default=60)
    enabled = s.BooleanField(source='rep_enabled', default=True)
    bwlimit = s.IntegerField(source='rep_bwlimit',
                             required=False,
                             min_value=0,
                             max_value=2147483647)
    last_sync = s.DateTimeField(read_only=True, required=False)
    reinit_required = s.BooleanField(source='rep_reinit_required',
                                     read_only=True,
                                     required=False)
    node_status = s.DisplayChoiceField(source='vm.node.status',
                                       choices=Node.STATUS_DB,
                                       read_only=True)
    created = s.DateTimeField(source="vm.created",
                              read_only=True,
                              required=False)

    def __init__(self, request, slave_vm, *args, **kwargs):
        self.img_required = None
        self.reserve_resources_changed = False
        self._detail_dict = {}

        super(VmReplicaSerializer, self).__init__(request, slave_vm, *args,
                                                  **kwargs)

        if request.method == 'POST':
            vm = slave_vm.vm
            dc_settings = request.dc.settings
            self.fields[
                'reserve_resources'].default = dc_settings.VMS_VM_REPLICA_RESERVATION_DEFAULT
            self.fields['node'].queryset = get_nodes(request, is_compute=True)
            self._disks = vm.json_get_disks()

            if vm.is_hvm():
                self.fields['disk_zpools'].max_items = len(self._disks)
            else:
                del self.fields['disk_zpools']
        else:
            self.fields['node'].required = False
            self.fields['node'].read_only = True
            self.fields['root_zpool'].read_only = True
            self.fields['disk_zpools'].read_only = True

    def validate_disk_zpools(self, attrs, source):
        """Basic disk_zpools validation (POST only)"""
        disk_zpools = attrs.get(source, None)

        if disk_zpools:
            if max(disk_zpools.keys()) > len(self._disks):
                raise s.ValidationError(_('Invalid disk_id.'))

        return attrs

    def validate_node(self, attrs, source):
        """Basic node validation (POST only)"""
        try:
            node = attrs[source]
        except KeyError:
            return attrs

        if node == self.object.node:
            raise s.ValidationError(
                _('Target node is the same as current node.'))

        if node.status != Node.ONLINE:
            raise s.ValidationError(_('Target node is not in online state.'))

        # Check nic tags
        try:
            validate_nic_tags(self.object.vm, new_node=node)
        except s.ValidationError:
            raise s.ValidationError(
                _('Some networks are not available on target node.'))

        return attrs

    def _validate_create(self, attrs):
        """Validate node storage zpools, resources, ... and create slave VM (POST only)"""
        node = attrs['node']
        self._detail_dict['node'] = node.hostname
        slave_vm = self.object
        slave_vm.set_rep_hostname()
        slave_vm.node = node
        slave_vm.reserve_resources = attrs.get('reserve_resources', True)
        slave_vm_define = SlaveVmDefine(slave_vm)

        # Validate root_zpool (we can do this after we know the new node)
        root_zpool = attrs.get('root_zpool', None)
        try:
            root_zpool = slave_vm_define.save_root_zpool(root_zpool)
        except s.APIValidationError as exc:
            self._errors['node'] = exc.api_errors
            return False
        else:
            if root_zpool:
                self._detail_dict['root_zpool'] = root_zpool

        # Validate disk_zpools (we can do this after we know the new node)
        if slave_vm.vm.is_hvm():
            disk_zpools = attrs.get('disk_zpools', {})
            try:
                disk_zpools = slave_vm_define.save_disk_zpools(disk_zpools)
            except s.APIValidationError as exc:
                self._errors['node'] = exc.api_errors
                return False
            else:
                if disk_zpools:
                    self._detail_dict['disk_zpools'] = disk_zpools

        # Validate dc_node resources
        try:
            slave_vm_define.validate_node_resources(
                ignore_cpu_ram=not slave_vm.reserve_resources)
        except s.APIValidationError as exc:
            self._errors['node'] = exc.api_errors
            return False

        # Validate storage resources
        try:
            slave_vm_define.validate_storage_resources()
        except s.APIValidationError as exc:
            self._errors['node'] = exc.api_errors
            return False

        # Validate images
        self.img_required = slave_vm_define.check_required_images()

        # noinspection PyAttributeOutsideInit
        self.slave_vm_define = slave_vm_define

        return True

    def _validate_update(self, attrs):
        """Validate node resources if reserve_resources changed to True"""
        try:
            reserve_resource = attrs['reserve_resources']
        except KeyError:
            pass
        else:
            # We need to know whether the user requested change of the reserve_resources attribute
            self.reserve_resources_changed = reserve_resource != self.object.reserve_resources

            if self.reserve_resources_changed and reserve_resource:
                slave_vm_define = SlaveVmDefine(self.object)

                try:
                    slave_vm_define.validate_node_resources(
                        ignore_cpu_ram=False, ignore_disk=True)
                except s.APIValidationError as exc:
                    self._errors['node'] = exc.api_errors
                    return False

        return True

    def validate(self, attrs):
        if self.object.rep_reinit_required:
            raise s.ValidationError(
                _('Server replica requires re-initialization.'))

        if self.request.method == 'POST':
            total = SlaveVm.objects.filter(
                master_vm=self.object.master_vm).exclude(name=u'').count()
            self.object.rep_id = total + 1
            limit = self.request.dc.settings.VMS_VM_REPLICA_LIMIT

            if limit is not None:
                if int(limit) <= total:
                    raise s.ValidationError(
                        _('Maximum number of server replicas reached.'))

            self._validate_create(attrs)
        else:  # PUT
            self._validate_update(attrs)

        return attrs

    def save_slave_vm(self):
        """Initial saving of slave VM - used only by POST vm_replica"""
        # The only difference between a slave and master VM should be the hostname
        # hence we change the slave hostname temporarily to the real hostname for the purpose of sync_json()
        slave_vm = self.object
        hostname = slave_vm.vm.hostname
        slave_vm.vm.hostname = slave_vm.master_vm.hostname
        slave_vm.vm.choose_vnc_port()
        slave_vm.vm.sync_json()
        slave_vm.vm.hostname = hostname

        # We also don't want to save the replication state (which can be only updated by vm_replica_cb)
        sync_status = slave_vm.sync_status
        slave_vm.sync_status = SlaveVm.DIS
        self.slave_vm_define.save()
        slave_vm.sync_status = sync_status

        return self.slave_vm_define.slave_vm

    def node_image_import(self):
        if self.img_required:
            ns, img = self.img_required
            return NodeImageView.import_for_vm(self.request, ns, img,
                                               self.object)
        return None

    def detail_dict(self, **kwargs):
        # noinspection PyStatementEffect
        self.data
        dd = super(VmReplicaSerializer, self).detail_dict(**kwargs)
        dd.update(self._detail_dict)
        dd['repname'] = self.object.name

        return dd
예제 #6
0
class NodeDefineSerializer(s.InstanceSerializer):
    """
    vms.models.Node
    """
    error_negative_resources = s.ErrorList(
        [_('Value is too low because of existing virtual machines.')])

    _model_ = Node
    _update_fields_ = ('status', 'owner', 'address', 'is_compute', 'is_backup',
                       'note', 'cpu_coef', 'ram_coef', 'monitoring_hostgroups',
                       'monitoring_templates')

    hostname = s.CharField(read_only=True)
    uuid = s.CharField(read_only=True)
    address = s.ChoiceField()
    status = s.IntegerChoiceField(choices=Node.STATUS_DB)
    node_status = s.DisplayChoiceField(source='status',
                                       choices=Node.STATUS_DB,
                                       read_only=True)
    owner = s.SlugRelatedField(slug_field='username',
                               queryset=User.objects,
                               read_only=False)
    is_head = s.BooleanField(read_only=True)
    is_compute = s.BooleanField()
    is_backup = s.BooleanField()
    note = s.CharField(required=False)
    cpu = s.IntegerField(source='cpu_total', read_only=True)
    ram = s.IntegerField(source='ram_total', read_only=True)
    cpu_coef = s.DecimalField(min_value=0, max_digits=4, decimal_places=2)
    ram_coef = s.DecimalField(min_value=0,
                              max_value=1,
                              max_digits=4,
                              decimal_places=2)
    cpu_free = s.IntegerField(read_only=True)
    ram_free = s.IntegerField(read_only=True)
    ram_kvm_overhead = s.IntegerField(read_only=True)
    sysinfo = s.Field(
        source='api_sysinfo')  # Field is read_only=True by default
    monitoring_hostgroups = s.ArrayField(
        max_items=16,
        default=[],
        validators=(RegexValidator(
            regex=MonitoringBackend.RE_MONITORING_HOSTGROUPS), ))
    monitoring_templates = s.ArrayField(max_items=32, default=[])
    created = s.DateTimeField(read_only=True, required=False)

    def __init__(self, request, instance, *args, **kwargs):
        super(NodeDefineSerializer, self).__init__(request, instance, *args,
                                                   **kwargs)
        self.clear_cache = False
        self.status_changed = False
        self.address_changed = False
        self.old_ip_address = None
        self.monitoring_changed = False

        if not kwargs.get('many', False):
            # Valid node IP addresses
            self.fields['address'].choices = [(ip, ip) for ip in instance.ips]
            # Used for update_node_resources()
            self._cpu_coef = instance.cpu_coef
            self._ram_coef = instance.ram_coef
            # Only active users
            self.fields['owner'].queryset = get_owners(request)

    def validate_owner(self, attrs, source):
        """Cannot change owner while pending tasks exist"""
        validate_owner(self.object, attrs.get(source, None), _('Compute node'))

        return attrs

    def validate_address(self, attrs, source):
        """Mark that node IP address is going to change"""
        new_address = attrs.get(source, None)

        if new_address and self.object.address != new_address:
            self.address_changed = True

            try:
                self.old_ip_address = self.object.ip_address
            except ObjectDoesNotExist:
                self.old_ip_address = None

        return attrs

    def validate_status(self, attrs, source):
        """Mark the status change -> used for triggering the signal.
        Do not allow a manual status change from unlicensed status."""
        try:
            value = attrs[source]
        except KeyError:
            return attrs

        if self.object.status != value:
            node = self.object

            if node.is_unlicensed():
                raise s.ValidationError(
                    _('Cannot change status. Please add a valid license first.'
                      ))

            if node.is_unreachable() or node.is_offline(
            ):  # Manual switch from unreachable and offline state
                if settings.DEBUG:
                    logger.warning(
                        'DEBUG mode on => skipping status checking of node %s',
                        self.object)
                elif not node_ping(self.object, all_workers=False
                                   ):  # requires that node is really online
                    raise s.ValidationError(
                        _('Cannot change status. Compute node is down.'))

            self.clear_cache = True
            self.status_changed = value

        return attrs

    def validate_is_compute(self, attrs, source):
        """Search for defined VMs when turning compute capability off"""
        if source in attrs and self.object.is_compute != attrs[source]:
            if self.object.vm_set.exists():
                raise s.ValidationError(_('Found existing VMs on node.'))
            self.clear_cache = True

        return attrs

    def validate_is_backup(self, attrs, source):
        """Search for existing backup definitions, which are using this node"""
        if source in attrs and self.object.is_backup != attrs[source]:
            if self.object.backupdefine_set.exists():
                raise s.ValidationError(
                    _('Found existing VM backup definitions.'))
            self.clear_cache = True

        # Check existing backups when removing node
        if self.request.method == 'DELETE':
            if self.object.backup_set.exists():
                raise s.ValidationError(_('Found existing VM backups.'))
            self.clear_cache = True

        return attrs

    def validate_monitoring_hostgroups(self, attrs, source):
        """Mark the monitoring change -> used for triggering the signal"""
        if source in attrs and self.object.monitoring_hostgroups != attrs[
                source]:
            self.monitoring_changed = True

        return attrs

    def validate_monitoring_templates(self, attrs, source):
        """Mark the monitoring change -> used for triggering the signal"""
        if source in attrs and self.object.monitoring_templates != attrs[
                source]:
            self.monitoring_changed = True

        return attrs

    @property
    def update_node_resources(self):
        """True if cpu_coef or ram_coef changed"""
        return not (self.object.cpu_coef == self._cpu_coef
                    and self.object.ram_coef == self._ram_coef)

    def save(self):
        """Update compute node attributes in database"""
        node = self.object

        # NOTE:
        # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free
        try:
            with transaction.atomic():
                node.save(update_resources=self.update_node_resources,
                          clear_cache=self.clear_cache)

                if self.update_node_resources:
                    if node.cpu_free < 0 or node.dcnode_set.filter(
                            cpu_free__lt=0).exists():
                        raise IntegrityError('cpu_check')

                    if node.ram_free < 0 or node.dcnode_set.filter(
                            ram_free__lt=0).exists():
                        raise IntegrityError('ram_check')

        except IntegrityError as exc:
            errors = {}
            exc_error = str(exc)
            # ram or cpu constraint was violated on vms_dcnode (can happen when DcNode strategy is set to RESERVED)
            # OR a an exception was raised above
            if 'ram_check' in exc_error:
                errors['ram_coef'] = self.error_negative_resources
            if 'cpu_check' in exc_error:
                errors['cpu_coef'] = self.error_negative_resources

            if not errors:
                raise exc

            return errors

        if self.update_node_resources:  # cpu_free or ram_free changed
            self.reload()

        return None