Beispiel #1
0
def change_address(
        vm_hostname, new_address,
        offline=False, migrate=False, allow_reserved_hv=False,
        offline_transport='drbd',
):
    """Change VMs IP address

    This is done by changing data in Serveradmin, running Puppet in VM and
    rebooting it.
    """

    if not offline:
        raise IGVMError('IP address change can be only performed offline')

    with _get_vm(vm_hostname) as vm:
        new_address = ip_address(new_address)

        if vm.dataset_obj['intern_ip'] == new_address:
            raise ConfigError('New IP address is the same as the old one!')

        if not vm.hypervisor.get_vlan_network(new_address) and not migrate:
            err = 'Current hypervisor does not support new subnet!'
            raise ConfigError(err)

        new_network = Query(
            {
                'servertype': 'route_network',
                'state': 'online',
                'network_type': 'internal',
                'intern_ip': Contains(new_address),
            }
        ).get()['hostname']

        vm_was_running = vm.is_running()

        with Transaction() as transaction:
            if vm_was_running:
                vm.shutdown(
                    transaction=transaction,
                    check_vm_up_on_transaction=False,
                )
            vm.change_address(
                new_address, new_network, transaction=transaction,
            )

            if migrate:
                vm_migrate(
                    vm_object=vm,
                    run_puppet=True, offline=True, no_shutdown=True,
                    allow_reserved_hv=allow_reserved_hv,
                    offline_transport=offline_transport,
                )
            else:
                vm.hypervisor.mount_vm_storage(vm, transaction=transaction)
                vm.run_puppet()
                vm.hypervisor.redefine_vm(vm)
                vm.hypervisor.umount_vm_storage(vm)

            if vm_was_running:
                vm.start()
Beispiel #2
0
def change_address(vm_hostname, new_address, offline=False):
    """Change VMs IP address

    This is done by changing data in Serveradmin, running Puppet in VM and
    rebooting it.
    """

    if not offline:
        raise IGVMError('IP address change can be only performed offline')

    with _get_vm(vm_hostname) as vm:
        if vm.dataset_obj['igvm_operation_mode'] != 'kvm':
            raise NotImplementedError(
                'This operation is not yet supported for {}'.format(
                    vm.dataset_obj['igvm_operation_mode']))

        old_address = vm.dataset_obj['intern_ip']
        new_address = ip_address(new_address)

        if old_address == new_address:
            raise ConfigError('New IP address is the same as the old one!')

        vm_was_running = vm.is_running()

        vm.dataset_obj['intern_ip'] = new_address
        vm.dataset_obj.commit()

        if vm_was_running:
            vm.shutdown()

        try:
            with Transaction() as transaction:
                vm.hypervisor.mount_vm_storage(vm, transaction)
                vm.run_puppet()
                vm.hypervisor.redefine_vm(vm)
                vm.hypervisor.umount_vm_storage(vm)
                if vm_was_running:
                    vm.start()
        except BaseException:
            vm.dataset_obj['intern_ip'] = old_address
            vm.dataset_obj.commit()
            raise
Beispiel #3
0
def _get_best_hypervisor(vm, hypervisor_states, offline=False):
    hypervisors = (Hypervisor(o) for o in Query(
        {
            'servertype': 'hypervisor',
            'environment': environ.get('IGVM_MODE', 'production'),
            'vlan_networks': vm.dataset_obj['route_network'],
            'state': Any(*hypervisor_states),
        }, HYPERVISOR_ATTRIBUTES))

    for hypervisor in sorted_hypervisors(HYPERVISOR_PREFERENCES, vm,
                                         hypervisors):
        # The actual resources are not checked during sorting for performance.
        # We need to validate the hypervisor using the actual values before
        # the final decision.
        try:
            hypervisor.acquire_lock()
        except InvalidStateError as error:
            log.warning(error)
            continue

        try:
            hypervisor.check_vm(vm, offline)
        except libvirtError as error:
            hypervisor.release_lock()
            log.warning('Preferred hypervisor "{}" is skipped: {}'.format(
                hypervisor, error))
            continue
        except HypervisorError as error:
            hypervisor.release_lock()
            log.warning('Preferred hypervisor "{}" is skipped: {}'.format(
                hypervisor, error))
            continue

        try:
            yield hypervisor
        finally:
            hypervisor.release_lock()
        break
    else:
        raise IGVMError('Cannot find a hypervisor')
Beispiel #4
0
def _get_best_hypervisor(
        vm,
        hypervisor_states,
        offline=False,
        enforce_vm_env=False,
        soft_preferences=False,
):
    hv_filter = {
        'servertype': 'hypervisor',
        'vlan_networks': vm.route_network,
        'state': Any(*hypervisor_states),
    }

    # Enforce IGVM_MODE used for tests
    if 'IGVM_MODE' in environ:
        hv_filter['environment'] = environ.get('IGVM_MODE')
    else:
        if enforce_vm_env:
            hv_filter['environment'] = vm.dataset_obj['environment']

    # Get all (theoretically) possible HVs sorted by HV preferences
    hypervisors = (
        Hypervisor(o) for o in
        Query(hv_filter, HYPERVISOR_ATTRIBUTES)
    )
    hypervisors = sort_by_preference(
        vm,
        HYPERVISOR_PREFERENCES,
        hypervisors,
        soft_preferences,
    )

    possible_hvs = OrderedDict()
    for possible_hv in hypervisors:
        possible_hvs[str(possible_hv)] = possible_hv

    # Check all HVs in parallel. This will check live data on those HVs
    # but without locking them. This allows us to do a real quick first
    # filtering round. Below follows another one on the filtered HVs only.
    chunk_size = 10
    iterations = math.ceil(len(possible_hvs) / chunk_size)
    found_hv = None

    # We are checking HVs in chunks. This will enable us to select HVs early
    # without looping through all of them if unnecessary.
    for i in range(iterations):
        start_idx = i * chunk_size
        end_idx = start_idx + chunk_size
        hv_chunk = dict(list(possible_hvs.items())[start_idx:end_idx])

        results = parallel(
            _check_vm,
            identifiers=list(hv_chunk.keys()),
            args=[
                [possible_hv, vm, offline]
                for possible_hv in hv_chunk.values()
            ],
            workers=chunk_size,
        )

        # Remove unsupported HVs from the list
        for checked_hv, success in results.items():
            if not success:
                hv_chunk.pop(checked_hv)

        # Do another checking iteration, this time with HV locking
        for possible_hv in hv_chunk.values():
            try:
                possible_hv.acquire_lock()
            except InvalidStateError as e:
                log.warning(e)
                continue

            if not _check_vm(possible_hv, vm, offline):
                possible_hv.release_lock()
                continue

            # HV found
            found_hv = possible_hv

            break

        if found_hv:
            break

    if not found_hv:
        # No supported HV was found
        raise IGVMError(
            'Automatically finding the best Hypervisor failed! '
            'Can not find a suitable hypervisor with the preferences and '
            'the Query: {}'.format(hv_filter))

    # Yield the hypervisor locked for working on it
    try:
        log.info('Picked {} as destination Hypervisor'.format(str(found_hv)))
        yield found_hv
    finally:
        found_hv.release_lock()
Beispiel #5
0
def vm_migrate(
        vm_hostname: str = None,
        vm_object=None,
        hypervisor_hostname: Optional[str] = None,
        run_puppet: bool = False,
        debug_puppet: bool = False,
        offline: bool = False,
        offline_transport: str = 'drbd',
        allow_reserved_hv: bool = False,
        no_shutdown: bool = False,
        enforce_vm_env: bool = False,
        disk_size: Optional[int] = None,
        soft_preferences: bool = False,
):
    """Migrate a VM to a new hypervisor."""

    if not (bool(vm_hostname) ^ bool(vm_object)):
        raise IGVMError(
            'Only one of vm_hostname or vm_object can be given!'
        )

    with ExitStack() as es:
        if vm_object:
            # VM given as object and hopefully already locked
            _vm = vm_object
        else:
            _vm = es.enter_context(
                _get_vm(vm_hostname, allow_retired=True)
            )

        # We have to check migration settings before searching for a HV,
        # because the new disk size must be checked and set
        current_size_gib = _vm.dataset_obj['disk_size_gib']
        _vm.dataset_obj['disk_size_gib'] = _vm.hypervisor.vm_new_disk_size(
            _vm, offline, offline_transport, disk_size
        )

        if hypervisor_hostname:
            hypervisor = es.enter_context(_get_hypervisor(
                hypervisor_hostname, allow_reserved=allow_reserved_hv
            ))
            if _vm.hypervisor.fqdn == hypervisor.fqdn:
                raise IGVMError(
                    'Source and destination Hypervisor is the same!'
                )
        else:
            hypervisor = es.enter_context(_get_best_hypervisor(
                _vm,
                ['online', 'online_reserved'] if allow_reserved_hv
                else ['online'],
                offline,
                enforce_vm_env,
                soft_preferences,
            ))

        # After the HV is chosen, disk_size_gib must be restored
        # to pass _check_attributes(_vm)
        _vm.dataset_obj['disk_size_gib'] = current_size_gib

        was_running = _vm.is_running()

        # There is no point of online migration, if the VM is already shutdown.
        if not was_running:
            offline = True

        if not offline and run_puppet:
            raise IGVMError('Online migration cannot run Puppet.')

        # Validate destination hypervisor can run the VM (needs to happen after
        # setting new IP!)
        hypervisor.check_vm(_vm, offline)

        # Require VM to be in sync with serveradmin
        _check_attributes(_vm)

        _vm.check_serveradmin_config()

        with Transaction() as transaction:
            _vm.hypervisor.migrate_vm(
                _vm, hypervisor, offline, offline_transport, transaction,
                no_shutdown, disk_size,
            )

            previous_hypervisor = _vm.hypervisor
            _vm.hypervisor = hypervisor

            def _reset_hypervisor():
                _vm.hypervisor = previous_hypervisor

            transaction.on_rollback('reset hypervisor', _reset_hypervisor)

            if run_puppet:
                hypervisor.mount_vm_storage(_vm, transaction)
                _vm.run_puppet(debug=debug_puppet)
                hypervisor.umount_vm_storage(_vm)

            if offline and was_running:
                _vm.start(transaction=transaction)
            _vm.reset_state()

            # Add migration log entries to hypervisor and previous_hypervisor
            hypervisor.log_migration(_vm, '+')
            transaction.on_rollback(
                'reset hypervisor log',
                hypervisor.log_migration,
                _vm,
                '-',
            )

            previous_hypervisor.log_migration(_vm, '-')
            transaction.on_rollback(
                'reset previous hypervisor log',
                previous_hypervisor.log_migration,
                _vm,
                '+',
            )

            # Update Serveradmin
            _vm.dataset_obj['hypervisor'] = hypervisor.dataset_obj['hostname']
            _vm.dataset_obj.commit()

        # If removing the existing VM fails we shouldn't risk undoing the newly
        # migrated one.
        previous_hypervisor.undefine_vm(_vm)
Beispiel #6
0
def vm_migrate(vm_hostname,
               hypervisor_hostname=None,
               run_puppet=False,
               debug_puppet=False,
               offline=False,
               offline_transport='drbd',
               allow_reserved_hv=False,
               no_shutdown=False):
    """Migrate a VM to a new hypervisor."""

    with ExitStack() as es:
        vm = es.enter_context(_get_vm(vm_hostname, allow_retired=True))

        if vm.dataset_obj['igvm_operation_mode'] != 'kvm':
            raise NotImplementedError(
                'This operation is not yet supported for {}'.format(
                    vm.dataset_obj['igvm_operation_mode']))

        if hypervisor_hostname:
            hypervisor = es.enter_context(
                _get_hypervisor(hypervisor_hostname,
                                allow_reserved=allow_reserved_hv))
            if vm.hypervisor.fqdn == hypervisor.fqdn:
                raise IGVMError(
                    'Source and destination Hypervisor is the same!')
        else:
            hypervisor = es.enter_context(
                _get_best_hypervisor(
                    vm,
                    ['online', 'online_reserved']
                    if allow_reserved_hv else ['online'],
                    offline,
                ))

        was_running = vm.is_running()

        # There is no point of online migration, if the VM is already shutdown.
        if not was_running:
            offline = True

        if not offline and run_puppet:
            raise IGVMError('Online migration cannot run Puppet.')

        # Validate destination hypervisor can run the VM (needs to happen after
        # setting new IP!)
        hypervisor.check_vm(vm, offline)

        # Require VM to be in sync with serveradmin
        _check_attributes(vm)

        vm.check_serveradmin_config()

        with Transaction() as transaction:
            vm.hypervisor.migrate_vm(
                vm,
                hypervisor,
                offline,
                offline_transport,
                transaction,
                no_shutdown,
            )

            previous_hypervisor = vm.hypervisor
            vm.hypervisor = hypervisor

            def _reset_hypervisor():
                vm.hypervisor = previous_hypervisor

            transaction.on_rollback('reset hypervisor', _reset_hypervisor)

            if run_puppet:
                hypervisor.mount_vm_storage(vm, transaction)
                vm.run_puppet(debug=debug_puppet)
                hypervisor.umount_vm_storage(vm)

            if offline and was_running:
                vm.start(transaction=transaction)
            vm.reset_state()

            # Update Serveradmin
            vm.dataset_obj['hypervisor'] = hypervisor.dataset_obj['hostname']
            vm.dataset_obj.commit()

        # If removing the existing VM fails we shouldn't risk undoing the newly
        # migrated one.
        previous_hypervisor.undefine_vm(vm)
Beispiel #7
0
def _get_best_hypervisor(vm, hypervisor_states, offline=False):
    hv_env = environ.get('IGVM_MODE', 'production')

    # Get all (theoretically) possible HVs sorted by HV preferences
    hypervisors = (Hypervisor(o) for o in Query(
        {
            'servertype': 'hypervisor',
            'environment': hv_env,
            'vlan_networks': vm.route_network,
            'state': Any(*hypervisor_states),
        }, HYPERVISOR_ATTRIBUTES))
    hypervisors = sorted_hypervisors(HYPERVISOR_PREFERENCES, vm, hypervisors)

    possible_hvs = OrderedDict()
    for possible_hv in hypervisors:
        possible_hvs[str(possible_hv)] = possible_hv

    # Check all HVs in parallel. This will check live data on those HVs
    # but without locking them. This allows us to do a real quick first
    # filtering round. Below follows another one on the filtered HVs only.
    results = parallel(
        _check_vm,
        identifiers=list(possible_hvs.keys()),
        args=[[possible_hv, vm, offline]
              for possible_hv in possible_hvs.values()],
    )

    # Remove unsupported HVs from the list
    for checked_hv, success in results.items():
        if not success:
            possible_hvs.pop(checked_hv)

    # No supported HV was found
    not_found_err = IGVMError(
        'Cannot find hypervisor matching environment: {}, '
        'states: {}, vlan_network: {}, offline: {}'.format(
            hv_env,
            ', '.join(hypervisor_states),
            vm.route_network,
            offline,
        ))

    if len(possible_hvs) == 0:
        raise not_found_err

    # Do another checking iteration, this time with HV locking
    for possible_hv in possible_hvs.values():
        try:
            possible_hv.acquire_lock()
        except InvalidStateError as e:
            log.warning(e)
            continue

        if not _check_vm(possible_hv, vm, offline):
            possible_hv.release_lock()
            continue

        try:
            yield possible_hv
            break
        finally:
            possible_hv.release_lock()
    else:
        raise not_found_err