Пример #1
0
    def vm_set_memory(self, vm, memory):
        self._check_committed(vm)
        vm.check_serveradmin_config()
        self._check_attribute_synced(vm, 'memory')

        running = self.vm_running(vm)

        if running and memory < vm.dataset_obj['memory']:
            raise InvalidStateError(
                'Cannot shrink memory while VM is running'
            )
        if self.free_vm_memory() < memory - vm.dataset_obj['memory']:
            raise HypervisorError('Not enough free memory on hypervisor.')

        log.info(
            'Changing memory of "{}" on "{}" from {} MiB to {} MiB'
            .format(vm.fqdn, self.fqdn, vm.dataset_obj['memory'], memory)
        )

        vm.dataset_obj['memory'] = memory
        vm.check_serveradmin_config()

        # If VM is offline, we can just rebuild the domain
        if not running:
            log.info('VM is offline, rebuilding domain with new settings')
            self.redefine_vm(vm)
            vm.dataset_obj.commit()
        else:
            old_total = vm.meminfo()['MemTotal']
            set_memory(self, vm, self._get_domain(vm))
            vm.dataset_obj.commit()

            # Hypervisor might take some time to propagate memory changes,
            # wait until MemTotal changes.
            retry_wait_backoff(
                lambda: vm.meminfo()['MemTotal'] != old_total,
                'New memory is not visible to virtual machine. Note that we '
                'can not online decrease the domains memory. The libvirt '
                'and serveradmin changes will therefore not be rolled back.',
                max_wait=40
            )

        # Validate changes, if possible.
        current_memory = self.vm_sync_from_hypervisor(vm).get('memory', memory)
        if current_memory != memory:
            raise HypervisorError(
                'Warning: The sanity check to see if libvirt reports the '
                'updated amount of memory for the domain we just changed has'
                'failed. Note that we can not online decrease the domains '
                'memory. The libvirt and serveradmin changes will therefore '
                'not be rolled back.'
            )
Пример #2
0
    def consolidated_sg(self) -> SecurityGroup:
        if self.__consolidated_sg:
            return self.__consolidated_sg

        # Sort member SGs as they must be identical on every run.
        csg_member_names = typing.cast(typing.Tuple[str],
                                       tuple(sorted(self.all_sgs)))

        csg_name = ('consolidated-' + sha256(
            (','.join(csg_member_names)).encode()).hexdigest())

        for sg in self.ec2r.security_groups.filter(Filters=[
            {
                'Name': 'group-name',
                'Values': [csg_name],
            },
            {
                'Name': 'vpc-id',
                'Values': [self.aws_vpc.id],
            },
        ], ):
            # There should be only one, hopefully.
            self.__consolidated_sg = sg
            break
        else:
            raise HypervisorError(f'Consolidated SG "{csg_name}" has not been '
                                  'synchronized to AWS yet!')

        return self.__consolidated_sg
Пример #3
0
    def vm_set_num_cpu(self, vm, num_cpu):
        """Change the number of CPUs of a VM"""
        self._check_committed(vm)
        self._check_attribute_synced(vm, 'num_cpu')

        if num_cpu < 1:
            raise ConfigError('Invalid num_cpu value: {}'.format(num_cpu))

        log.info('Changing #CPUs of "{}" on "{}" from {} to {}...'.format(
            vm.fqdn, self.fqdn, vm.dataset_obj['num_cpu'], num_cpu))

        # If VM is offline, we can just rebuild the domain
        if not self.vm_running(vm):
            log.info('VM is offline, rebuilding domain with new settings')
            vm.dataset_obj['num_cpu'] = num_cpu
            self.redefine_vm(vm)
        else:
            set_vcpus(self, vm, self._get_domain(vm), num_cpu)

        # Validate changes
        # We can't rely on the hypervisor to provide data on VMs all the time.
        updated_dataset_obj = self.vm_sync_from_hypervisor(vm)
        current_num_cpu = updated_dataset_obj['num_cpu']
        if current_num_cpu != num_cpu:
            raise HypervisorError(
                'New CPUs are not visible to hypervisor, changes will not be '
                'committed.')

        vm.dataset_obj['num_cpu'] = num_cpu
        vm.dataset_obj.commit()
Пример #4
0
 def _get_domain(self, vm):
     domain = self._find_domain(vm)
     if not domain:
         raise HypervisorError(
             'Unable to find domain "{}" on hypervisor "{}".'.format(
                 vm.fqdn, self.fqdn))
     return domain
Пример #5
0
 def vm_mount_path(self, vm):
     """Returns the mount path for a VM or raises HypervisorError if not
     mounted."""
     if vm not in self._mount_path:
         raise HypervisorError('"{}" is not mounted on "{}".'.format(
             vm.fqdn, self.fqdn))
     return self._mount_path[vm]
Пример #6
0
Файл: kvm.py Проект: seqizz/igvm
def set_memory(hypervisor, vm, domain):
    """Changes the amount of memory of a VM."""
    props = DomainProperties.from_running(hypervisor, vm, domain)

    if props.mem_balloon:
        log.info('Attempting to increase memory with ballooning')
        try:
            domain.setMemoryFlags(
                vm.dataset_obj['memory'] * 1024,
                VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG,
            )
            return
        except libvirtError:
            log.info(
                'Adding memory via ballooning failed, falling back to hotplug')

    if props.mem_hotplug:
        add_memory = vm.dataset_obj['memory'] - props.current_memory
        assert add_memory > 0
        assert add_memory % (128 * props.num_nodes) == 0
        _attach_memory_dimms(vm, domain, props, add_memory)
        # We used to set DIMMS online here but now we have udev rule for that.
        return

    raise HypervisorError(
        '"{}" does not support any known memory extension strategy. '
        'You will have to power off the machine and do it offline.'.format(
            vm.fqdn))
Пример #7
0
 def conn(self):
     conn = get_virtconn(self.fqdn)
     if not conn:
         raise HypervisorError(
             'Unable to connect to hypervisor "{}"!'
             .format(self.fqdn)
         )
     return conn
Пример #8
0
Файл: kvm.py Проект: seqizz/igvm
def set_vcpus(hypervisor, vm, domain, num_cpu):
    """Changes the number of active VCPUs."""
    props = DomainProperties.from_running(hypervisor, vm, domain)
    if num_cpu > props.max_cpus:
        raise HypervisorError('VM can not receive more than {} VCPUs'.format(
            props.max_cpus))

    # Note: We could support the guest agent in here by first trying the
    #       VIR_DOMAIN_VCPU_GUEST flag. This would allow live shrinking.
    #       However, changes via the guest agent are not persisted in the
    #       config (another run with VIR_DOMAIN_AFFECT_CONFIG doesn't help),
    #       so the VM will be back to the old value after the next reboot.

    try:
        domain.setVcpusFlags(num_cpu,
                             VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)
    except libvirtError as e:
        raise HypervisorError('setVcpus failed: {}'.format(e))

    # Properly pin all new VCPUs
    _live_repin_cpus(domain, props, hypervisor.dataset_obj['num_cpu'])
Пример #9
0
    def get_storage_type(self):
        if self._storage_type:
            return self._storage_type

        self._storage_type = ElementTree.fromstring(
            self.get_storage_pool().XMLDesc()).attrib['type']

        if (self._storage_type not in HOST_RESERVED_MEMORY
                or self._storage_type not in RESERVED_DISK):
            raise HypervisorError(
                'Unsupported storage type {} on hypervisor {}'.format(
                    self._storage_type, self.dataset_obj['hostname']))
        return self._storage_type
Пример #10
0
    def undefine_vm(self, vm, keep_storage=False):
        if self.vm_running(vm):
            raise InvalidStateError(
                'Refusing to undefine running VM "{}"'.format(vm.fqdn))
        log.info('Undefining "{}" on "{}"'.format(vm.fqdn, self.fqdn))

        if not keep_storage:
            # XXX: get_volume_by_vm depends on domain names to find legacy
            # domains w/o an uid_name.  The order is therefore important.
            self.get_volume_by_vm(vm).delete()

        if self._get_domain(vm).undefine() != 0:
            raise HypervisorError('Unable to undefine "{}".'.format(vm.fqdn))
Пример #11
0
    def vm_sync_from_hypervisor(self, vm):
        """Synchronizes serveradmin information from the actual data on
        the hypervisor. Returns a dict with all collected values."""
        # Update disk size
        result = {}
        try:
            vol_size = self.get_volume_by_vm(vm).info()[1]
            result['disk_size_gib'] = int(math.ceil(vol_size / 1024**3))
        except HypervisorError:
            raise HypervisorError(
                'Unable to find source LV and determine its size.')

        self._vm_sync_from_hypervisor(vm, result)
        return result
Пример #12
0
    def _find_domain(self, vm):
        """Search and return the domain on hypervisor

        It is erroring out when multiple domains found, and returning None,
        when none found.
        """
        found = None
        # We are not using lookupByName(), because it prints ugly messages to
        # the console.
        for domain in self.conn().listAllDomains():
            name = domain.name()
            # Match the domain based on the object_id encoded in its name
            if not vm.match_uid_name(name):
                continue

            if found is not None:
                raise HypervisorError(
                    'Same VM is defined multiple times as "{}" and "{}".'.
                    format(found.name(), name))
            found = domain
        return found
Пример #13
0
Файл: kvm.py Проект: seqizz/igvm
def _set_cpu_model(hypervisor, vm, tree):
    """
    Selects CPU model based on hardware model.
    """
    hw_model = hypervisor.dataset_obj['hardware_model']

    for arch, models in KVM_HWMODEL_TO_CPUMODEL.items():
        if hw_model in models:
            cpu = _find_or_create(tree, 'cpu')
            cpu.attrib.update({
                'match': 'exact',
                'mode': 'custom',
            })
            model = _find_or_create(cpu, 'model')
            model.attrib.update({
                'fallback': 'allow',
            })
            model.text = arch
            log.info('KVM: CPU model set to "%s"' % arch)
            return
    raise HypervisorError(
        'No CPU configuration for hardware model "{}"'.format(hw_model))
Пример #14
0
 def stop_vm_force(self, vm):
     log.info('Force-stopping "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
     if self._get_domain(vm).destroy() != 0:
         raise HypervisorError('Unable to force-stop "{}".'.format(vm.fqdn))
Пример #15
0
 def stop_vm(self, vm):
     log.info('Shutting down "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
     if self._get_domain(vm).shutdown() != 0:
         raise HypervisorError('Unable to stop "{}".'.format(vm.fqdn))
Пример #16
0
    def check_vm(self, vm, offline):
        """Check whether a VM can run on this hypervisor"""
        # Cheap checks should always be executed first to save time
        # and fail early. Same goes for checks that are more likely to fail.

        # Immediately check whether HV is even supported.
        if not offline:
            # Compatbile OS?
            os_pair = (vm.hypervisor.dataset_obj['os'], self.dataset_obj['os'])
            if os_pair not in MIGRATE_CONFIG:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *os_pair))

            # Compatible CPU model?
            hw_pair = (
                vm.hypervisor.dataset_obj['hardware_model'],
                self.dataset_obj['hardware_model'],
            )
            cpu_pair = [
                arch for arch, models in KVM_HWMODEL_TO_CPUMODEL.items()
                for model in hw_pair if model in models
            ]
            if cpu_pair[0] != cpu_pair[1]:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *hw_pair))

        # HV in supported state?
        if self.dataset_obj['state'] not in ['online', 'online_reserved']:
            raise InvalidStateError(
                'Hypervisor "{}" is not in online state ({}).'.format(
                    self.fqdn, self.dataset_obj['state']))

        # Enough CPUs?
        if vm.dataset_obj['num_cpu'] > self.dataset_obj['num_cpu']:
            raise HypervisorError(
                'Not enough CPUs. Destination Hypervisor has {0}, '
                'but VM requires {1}.'.format(self.dataset_obj['num_cpu'],
                                              vm.dataset_obj['num_cpu']))

        # Proper VLAN?
        if not self.get_vlan_network(vm.dataset_obj['intern_ip']):
            raise HypervisorError(
                'Hypervisor "{}" does not support route_network "{}".'.format(
                    self.fqdn, vm.route_network))

        # Those checks below all require libvirt connection,
        # so execute them last to avoid unnecessary overhead if possible.

        # Enough memory?
        free_mib = self.free_vm_memory()
        if vm.dataset_obj['memory'] > free_mib:
            raise HypervisorError(
                'Not enough memory. '
                'Destination Hypervisor has {:.2f} MiB but VM requires {} MiB '
                .format(free_mib, vm.dataset_obj['memory']))

        # Enough disk?
        free_disk_space = self.get_free_disk_size_gib()
        vm_disk_size = float(vm.dataset_obj['disk_size_gib'])
        if vm_disk_size > free_disk_space:
            raise HypervisorError(
                'Not enough free space in VG {} to build VM while keeping'
                ' {} GiB reserved'.format(
                    VG_NAME, RESERVED_DISK[self.get_storage_type()]))

        # VM already defined? Least likely, if at all.
        if self.vm_defined(vm):
            raise HypervisorError('VM "{}" is already defined on "{}".'.format(
                vm.fqdn, self.fqdn))
Пример #17
0
 def start_vm(self, vm):
     log.info('Starting "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
     if self._get_domain(vm).create() != 0:
         raise HypervisorError('"{0}" failed to start'.format(vm.fqdn))
Пример #18
0
    def check_vm(self, vm, offline):
        """Check whether a VM can run on this hypervisor"""
        if self.dataset_obj['state'] not in ['online', 'online_reserved']:
            raise InvalidStateError(
                'Hypervisor "{}" is not in online state ({}).'.format(
                    self.fqdn, self.dataset_obj['state']))

        if self.vm_defined(vm):
            raise HypervisorError('VM "{}" is already defined on "{}".'.format(
                vm.fqdn, self.fqdn))

        # Enough CPUs?
        if vm.dataset_obj['num_cpu'] > self.dataset_obj['num_cpu']:
            raise HypervisorError(
                'Not enough CPUs. Destination Hypervisor has {0}, '
                'but VM requires {1}.'.format(self.dataset_obj['num_cpu'],
                                              vm.dataset_obj['num_cpu']))

        # Enough memory?
        free_mib = self.free_vm_memory()
        if vm.dataset_obj['memory'] > free_mib:
            raise HypervisorError(
                'Not enough memory. '
                'Destination Hypervisor has {:.2f} MiB but VM requires {} MiB '
                .format(free_mib, vm.dataset_obj['memory']))

        if not offline:
            # Compatbile OS?
            os_pair = (vm.hypervisor.dataset_obj['os'], self.dataset_obj['os'])
            if os_pair not in MIGRATE_CONFIG:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *os_pair))

            # Compatible CPU model?
            hw_pair = (
                vm.hypervisor.dataset_obj['hardware_model'],
                self.dataset_obj['hardware_model'],
            )
            cpu_pair = [
                arch for arch, models in KVM_HWMODEL_TO_CPUMODEL.items()
                for model in hw_pair if model in models
            ]
            if cpu_pair[0] != cpu_pair[1]:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *hw_pair))

        # Enough disk?
        free_disk_space = self.get_free_disk_size_gib()
        vm_disk_size = float(vm.dataset_obj['disk_size_gib'])
        if vm_disk_size > free_disk_space:
            raise HypervisorError(
                'Not enough free space in VG {} to build VM while keeping'
                ' {} GiB reserved'.format(
                    VG_NAME, RESERVED_DISK[self.get_storage_type()]))

        # Proper VLAN?
        if not self.get_vlan_network(vm.dataset_obj['intern_ip']):
            raise HypervisorError(
                'Hypervisor "{}" does not support route_network "{}".'.format(
                    self.fqdn, vm.dataset_obj['route_network']))