Exemple #1
0
    def acquire_lock(self, allow_fail=False):
        if self.dataset_obj['igvm_locked'] is not None:
            raise InvalidStateError(
                'Server "{0}" is already being worked on by another igvm'.
                format(self.dataset_obj['hostname']))

        self.dataset_obj['igvm_locked'] = datetime.utcnow()
        try:
            self.dataset_obj.commit()
        except DatasetError:
            raise InvalidStateError(
                'Server "{0}" is already being worked on by another igvm'.
                format(self.dataset_obj['hostname']))
Exemple #2
0
def vm_delete(vm_hostname, retire=False):
    """Delete the VM from the hypervisor and from serveradmin

    If retire is True the VM will not be deleted from serveradmin but it's
    state will be updated to 'retired'.
    """

    with _get_vm(vm_hostname, unlock=retire, allow_retired=True) as vm:
        if vm.dataset_obj['datacenter_type'] == 'aws.dct':
            vm_status_code = vm.aws_describe_instance_status(
                vm.dataset_obj['aws_instance_id'])
            if vm_status_code != AWS_RETURN_CODES['stopped']:
                raise InvalidStateError('"{}" is still running.'.format(
                    vm.fqdn))
            else:
                vm.aws_delete()
        elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':
            # Make sure the VM has a hypervisor and that it is defined on it.
            # Abort if the VM has not been defined.
            _check_defined(vm)

            # Make sure the VM is shut down, abort if it is not.
            if vm.hypervisor and vm.hypervisor.vm_defined(
                    vm) and vm.is_running():
                raise InvalidStateError('"{}" is still running.'.format(
                    vm.fqdn))

            # Delete the VM from its hypervisor if required.
            if vm.hypervisor and vm.hypervisor.vm_defined(vm):
                vm.hypervisor.undefine_vm(vm)
        else:
            raise NotImplementedError(
                'This operation is not yet supported for {}'.format(
                    vm.dataset_obj['datacenter_type']))

        # Delete the machines cert from puppet in case we want to build one with the same name in the future
        clean_cert(vm.dataset_obj)

        # Delete the serveradmin object of this VM
        # or update its state to 'retired' if retire is True.
        if retire:
            vm.dataset_obj['state'] = 'retired'
            vm.dataset_obj.commit()
            log.info('"{}" is destroyed and set to "retired" state.'.format(
                vm.fqdn))
        else:
            vm.dataset_obj.delete()
            vm.dataset_obj.commit()
            log.info('"{}" is destroyed and deleted from Serveradmin'.format(
                vm.fqdn))
Exemple #3
0
def vm_delete(vm_hostname, retire=False):
    """Delete the VM from the hypervisor and from serveradmin

    If retire is True the VM will not be deleted from serveradmin but it's
    state will be updated to 'retired'.
    """

    with _get_vm(vm_hostname, unlock=retire, allow_retired=True) as vm:
        # Make sure the VM has a hypervisor and that it is defined on it.
        # Abort if the VM has not been defined.
        _check_defined(vm)

        # Make sure the VM is shut down, abort if it is not.
        if vm.hypervisor and vm.hypervisor.vm_defined(vm) and vm.is_running():
            raise InvalidStateError('"{}" is still running.'.format(vm.fqdn))

        # Delete the VM from its hypervisor if required.
        if vm.hypervisor and vm.hypervisor.vm_defined(vm):
            vm.hypervisor.undefine_vm(vm)

        # Delete the serveradmin object of this VM
        # or update its state to 'retired' if retire is True.
        if retire:
            vm.dataset_obj['state'] = 'retired'
            vm.dataset_obj.commit()
            log.info('"{}" is destroyed and set to "retired" state.'.format(
                vm.fqdn))
        else:
            vm.dataset_obj.delete()
            vm.dataset_obj.commit()
            log.info('"{}" is destroyed and deleted from Serveradmin'.format(
                vm.fqdn))
Exemple #4
0
def vm_restart(vm_hostname, force=False, no_redefine=False):
    """Restart a VM

    The VM is shut down and recreated, using the existing disk. This can be
    useful to discard temporary changes or adapt new hypervisor optimizations.
    No data will be lost.
    """
    with ExitStack() as es:
        vm = es.enter_context(_get_vm(vm_hostname))
        _check_defined(vm)

        if not vm.is_running():
            raise InvalidStateError('"{}" is not running'.format(vm.fqdn))

        if force:
            vm.hypervisor.stop_vm_force(vm)
        else:
            vm.shutdown()

        if not no_redefine:
            vm.hypervisor.redefine_vm(vm)

        vm.start()

        log.info('"{}" is restarted.'.format(vm.fqdn))
Exemple #5
0
    def __init__(self, dataset_obj):
        super(Hypervisor, self).__init__(dataset_obj)

        if dataset_obj['state'] == 'retired':
            raise InvalidStateError('Hypervisor "{0}" is retired.'.format(
                self.fqdn))

        self._mount_path = {}
        self._storage_pool = None
        self._storage_type = None
Exemple #6
0
    def format_vm_storage(self, vm, transaction=None):
        """Create new filesystem for VM and mount it. Returns mount path."""

        if self.vm_defined(vm):
            raise InvalidStateError(
                'Refusing to format storage of defined VM "{}".'.format(
                    vm.fqdn))

        self.format_storage(self.get_volume_by_vm(vm).path())
        return self.mount_vm_storage(vm, transaction)
Exemple #7
0
    def vm_set_memory(self, vm, memory):
        self._check_committed(vm)
        vm.check_serveradmin_config()
        self._check_attribute_synced(vm, 'memory')

        running = self.vm_running(vm)

        if running and memory < vm.dataset_obj['memory']:
            raise InvalidStateError(
                'Cannot shrink memory while VM is running'
            )
        if self.free_vm_memory() < memory - vm.dataset_obj['memory']:
            raise HypervisorError('Not enough free memory on hypervisor.')

        log.info(
            'Changing memory of "{}" on "{}" from {} MiB to {} MiB'
            .format(vm.fqdn, self.fqdn, vm.dataset_obj['memory'], memory)
        )

        vm.dataset_obj['memory'] = memory
        vm.check_serveradmin_config()

        # If VM is offline, we can just rebuild the domain
        if not running:
            log.info('VM is offline, rebuilding domain with new settings')
            self.redefine_vm(vm)
            vm.dataset_obj.commit()
        else:
            old_total = vm.meminfo()['MemTotal']
            set_memory(self, vm, self._get_domain(vm))
            vm.dataset_obj.commit()

            # Hypervisor might take some time to propagate memory changes,
            # wait until MemTotal changes.
            retry_wait_backoff(
                lambda: vm.meminfo()['MemTotal'] != old_total,
                'New memory is not visible to virtual machine. Note that we '
                'can not online decrease the domains memory. The libvirt '
                'and serveradmin changes will therefore not be rolled back.',
                max_wait=40
            )

        # Validate changes, if possible.
        current_memory = self.vm_sync_from_hypervisor(vm).get('memory', memory)
        if current_memory != memory:
            raise HypervisorError(
                'Warning: The sanity check to see if libvirt reports the '
                'updated amount of memory for the domain we just changed has'
                'failed. Note that we can not online decrease the domains '
                'memory. The libvirt and serveradmin changes will therefore '
                'not be rolled back.'
            )
Exemple #8
0
    def undefine_vm(self, vm, keep_storage=False):
        if self.vm_running(vm):
            raise InvalidStateError(
                'Refusing to undefine running VM "{}"'.format(vm.fqdn))
        log.info('Undefining "{}" on "{}"'.format(vm.fqdn, self.fqdn))

        if not keep_storage:
            # XXX: get_volume_by_vm depends on domain names to find legacy
            # domains w/o an uid_name.  The order is therefore important.
            self.get_volume_by_vm(vm).delete()

        if self._get_domain(vm).undefine() != 0:
            raise HypervisorError('Unable to undefine "{}".'.format(vm.fqdn))
Exemple #9
0
def _get_vm(hostname, unlock=True, allow_retired=False):
    """Get a server from Serveradmin by hostname to return VM object

    The function is accepting hostnames in any length as long as it resolves
    to a single server on Serveradmin.
    """

    object_id = Query({
        'hostname': Any(hostname, StartsWith(hostname + '.')),
        'servertype': 'vm',
    }, ['object_id']).get()['object_id']

    def vm_query():
        return Query({
            'object_id': object_id,
        }, VM_ATTRIBUTES).get()

    dataset_obj = vm_query()

    hypervisor = None
    if dataset_obj['hypervisor']:
        hypervisor = Hypervisor(dataset_obj['hypervisor'])

        # XXX: Ugly hack until adminapi supports modifying joined objects
        dict.__setitem__(
            dataset_obj, 'hypervisor', dataset_obj['hypervisor']['hostname']
        )

    vm = VM(dataset_obj, hypervisor)
    vm.acquire_lock()

    try:
        if not allow_retired and dataset_obj['state'] == 'retired':
            raise InvalidStateError(
                'VM {} is in state retired, I refuse to work on it!'.format(
                    hostname,
                )
            )
        yield vm
    except (Exception, KeyboardInterrupt):
        VM(vm_query(), hypervisor).release_lock()
        raise
    else:
        # We re-fetch the VM because we can't risk commiting any other changes
        # to the VM than unlocking. There can be changes from failed things,
        # like setting memory.
        # Most operations require unlocking, the only exception is deleting of
        # a VM. After object is deleted, it can't be unlocked.
        if unlock:
            VM(vm_query(), hypervisor).release_lock()
Exemple #10
0
def vm_start(vm_hostname, unretire=None):
    """Start a VM"""
    with _get_vm(vm_hostname) as vm:
        if unretire and vm.dataset_obj['state'] != 'retired':
            raise InvalidStateError('Can\'t unretire a non-retired VM!')

        _check_defined(vm)
        if vm.is_running():
            log.info('"{}" is already running.'.format(vm.fqdn))
            return
        vm.start()

        if unretire:
            vm.dataset_obj['state'] = unretire
            vm.dataset_obj.commit()
Exemple #11
0
def _check_defined(vm, fail_hard=True):
    error = None

    if not vm.hypervisor:
        error = ('"{}" has no hypervisor defined. Use --force to ignore this'
                 .format(vm.fqdn))
    elif not vm.hypervisor.vm_defined(vm):
        error = ('"{}" is not built yet or is not running on "{}"'
                 .format(vm.fqdn, vm.hypervisor.fqdn))

    if error:
        if fail_hard:
            raise InvalidStateError(error)
        else:
            log.info(error)
Exemple #12
0
def vm_build(
        vm_hostname: str,
        run_puppet: bool = True,
        debug_puppet: bool = False,
        postboot: Optional[str] = None,
        allow_reserved_hv: bool = False,
        rebuild: bool = False,
        enforce_vm_env: bool = False,
        soft_preferences: bool = False,
):
    """Create a VM and start it

    Puppet in run once to configure baseline networking.
    """

    with ExitStack() as es:
        vm = es.enter_context(_get_vm(vm_hostname))

        if vm.hypervisor:
            es.enter_context(_lock_hv(vm.hypervisor))
        else:
            vm.hypervisor = es.enter_context(_get_best_hypervisor(
                vm,
                ['online', 'online_reserved'] if allow_reserved_hv
                else ['online'],
                True,
                enforce_vm_env,
                soft_preferences,
            ))
            vm.dataset_obj['hypervisor'] = \
                vm.hypervisor.dataset_obj['hostname']

        if vm.hypervisor.vm_defined(vm) and vm.is_running():
            raise InvalidStateError(
                '"{}" is still running.'.format(vm.fqdn)
            )

        if rebuild and vm.hypervisor.vm_defined(vm):
            vm.hypervisor.undefine_vm(vm)

        vm.build(
            run_puppet=run_puppet,
            debug_puppet=debug_puppet,
            postboot=postboot,
            cleanup_cert=rebuild,
        )

        vm.dataset_obj.commit()
Exemple #13
0
    def format_vm_storage(self, vm, transaction=None):
        """Create new filesystem for VM and mount it. Returns mount path."""

        if self.vm_defined(vm):
            raise InvalidStateError(
                'Refusing to format storage of defined VM "{}".'.format(
                    vm.fqdn))

        mkfs_options = XFS_CONFIG.get(vm.dataset_obj['os'])

        if not mkfs_options:
            raise ConfigError('No mkfs options defined for OS {}'.format(
                vm.dataset_obj['os']))

        self.format_storage(self.get_volume_by_vm(vm).path(), mkfs_options)
        return self.mount_vm_storage(vm, transaction)
Exemple #14
0
    def mount_vm_storage(self, vm, transaction=None):
        """Mount VM filesystem on host and return mount point."""
        if vm in self._mount_path:
            return self._mount_path[vm]

        if self.vm_defined(vm) and self.vm_running(vm):
            raise InvalidStateError(
                'Refusing to mount VM filesystem while VM is powered on')

        self._mount_path[vm] = self.mount_temp(
            self.get_volume_by_vm(vm).path(), suffix=('-' + vm.fqdn))
        if transaction:
            transaction.on_rollback('unmount storage', self.umount_vm_storage,
                                    vm)

        vm.mounted = True
        return self._mount_path[vm]
Exemple #15
0
def _get_hypervisor(hostname, allow_reserved=False):
    """Get a server from Serveradmin by hostname to return Hypervisor object"""
    dataset_obj = Query({
        'hostname': hostname,
        'servertype': 'hypervisor',
    }, HYPERVISOR_ATTRIBUTES).get()

    if not allow_reserved and dataset_obj['state'] == 'online_reserved':
        raise InvalidStateError('Server "{0}" is online_reserved.'.format(
            dataset_obj['hostname']))

    hypervisor = Hypervisor(dataset_obj)
    hypervisor.acquire_lock()

    try:
        yield hypervisor
    finally:
        hypervisor.release_lock()
Exemple #16
0
def vm_start(vm_hostname, unretire=None):
    """Start a VM"""
    with _get_vm(vm_hostname) as vm:
        if unretire and vm.dataset_obj['state'] != 'retired':
            raise InvalidStateError('Can\'t unretire a non-retired VM!')

        if vm.dataset_obj['datacenter_type'] == 'aws.dct':
            vm.aws_start()
        elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':
            _check_defined(vm)
            if vm.is_running():
                log.info('"{}" is already running.'.format(vm.fqdn))
                return
            vm.start()
        else:
            raise NotImplementedError(
                'This operation is not yet supported for {}'.format(
                    vm.dataset_obj['datacenter_type']))

        if unretire:
            vm.dataset_obj['state'] = unretire
            vm.dataset_obj.commit()
Exemple #17
0
def vm_build(
    vm_hostname: str,
    run_puppet: bool = True,
    debug_puppet: bool = False,
    postboot: Optional[str] = None,
    allow_reserved_hv: bool = False,
    rebuild: bool = False,
    enforce_vm_env: bool = False,
    soft_preferences: bool = False,
):
    """Create a VM and start it

    Puppet in run once to configure baseline networking.
    """

    with ExitStack() as es:
        vm = es.enter_context(_get_vm(vm_hostname))

        if vm.dataset_obj['datacenter_type'] == 'aws.dct':
            jenv = Environment(loader=PackageLoader('igvm', 'templates'))
            template = jenv.get_template('aws_user_data.cfg')
            user_data = template.render(
                hostname=vm.dataset_obj['hostname'].replace('.ig.local', ''),
                fqdn=vm.dataset_obj['hostname'],
                vm_os=vm.dataset_obj['os'],
                apt_repos=AWS_CONFIG[0]['apt'],
                puppet_master=vm.dataset_obj['puppet_master'],
                puppet_ca=vm.dataset_obj['puppet_ca'],
            )

            if rebuild:
                vm.aws_delete()
                timeout_terminate = 60
                instance_status = vm.aws_describe_instance_status(
                    vm.dataset_obj['aws_instance_id'])
                while (timeout_terminate
                       and AWS_RETURN_CODES['terminated'] != instance_status):
                    timeout_terminate -= 1
                    sleep(1)

            vm.aws_build(run_puppet=run_puppet,
                         debug_puppet=debug_puppet,
                         postboot=user_data)
            attributes = vm.aws_sync()
            for attr, val in attributes.items():
                vm.dataset_obj[attr] = val
        elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':
            if vm.hypervisor:
                es.enter_context(_lock_hv(vm.hypervisor))
            else:
                vm.hypervisor = es.enter_context(
                    _get_best_hypervisor(
                        vm,
                        ['online', 'online_reserved']
                        if allow_reserved_hv else ['online'],
                        True,
                        enforce_vm_env,
                        soft_preferences,
                    ))
                vm.dataset_obj['hypervisor'] = \
                    vm.hypervisor.dataset_obj['hostname']

            if vm.hypervisor.vm_defined(vm) and vm.is_running():
                raise InvalidStateError('"{}" is still running.'.format(
                    vm.fqdn))

            if rebuild and vm.hypervisor.vm_defined(vm):
                vm.hypervisor.undefine_vm(vm)

            vm.build(
                run_puppet=run_puppet,
                debug_puppet=debug_puppet,
                postboot=postboot,
                cleanup_cert=rebuild,
            )
        else:
            raise NotImplementedError(
                'This operation is not yet supported for {}'.format(
                    vm.dataset_obj['datacenter_type']))

        vm.dataset_obj.commit()
Exemple #18
0
    def check_vm(self, vm, offline):
        """Check whether a VM can run on this hypervisor"""
        if self.dataset_obj['state'] not in ['online', 'online_reserved']:
            raise InvalidStateError(
                'Hypervisor "{}" is not in online state ({}).'.format(
                    self.fqdn, self.dataset_obj['state']))

        if self.vm_defined(vm):
            raise HypervisorError('VM "{}" is already defined on "{}".'.format(
                vm.fqdn, self.fqdn))

        # Enough CPUs?
        if vm.dataset_obj['num_cpu'] > self.dataset_obj['num_cpu']:
            raise HypervisorError(
                'Not enough CPUs. Destination Hypervisor has {0}, '
                'but VM requires {1}.'.format(self.dataset_obj['num_cpu'],
                                              vm.dataset_obj['num_cpu']))

        # Enough memory?
        free_mib = self.free_vm_memory()
        if vm.dataset_obj['memory'] > free_mib:
            raise HypervisorError(
                'Not enough memory. '
                'Destination Hypervisor has {:.2f} MiB but VM requires {} MiB '
                .format(free_mib, vm.dataset_obj['memory']))

        if not offline:
            # Compatbile OS?
            os_pair = (vm.hypervisor.dataset_obj['os'], self.dataset_obj['os'])
            if os_pair not in MIGRATE_CONFIG:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *os_pair))

            # Compatible CPU model?
            hw_pair = (
                vm.hypervisor.dataset_obj['hardware_model'],
                self.dataset_obj['hardware_model'],
            )
            cpu_pair = [
                arch for arch, models in KVM_HWMODEL_TO_CPUMODEL.items()
                for model in hw_pair if model in models
            ]
            if cpu_pair[0] != cpu_pair[1]:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *hw_pair))

        # Enough disk?
        free_disk_space = self.get_free_disk_size_gib()
        vm_disk_size = float(vm.dataset_obj['disk_size_gib'])
        if vm_disk_size > free_disk_space:
            raise HypervisorError(
                'Not enough free space in VG {} to build VM while keeping'
                ' {} GiB reserved'.format(
                    VG_NAME, RESERVED_DISK[self.get_storage_type()]))

        # Proper VLAN?
        if not self.get_vlan_network(vm.dataset_obj['intern_ip']):
            raise HypervisorError(
                'Hypervisor "{}" does not support route_network "{}".'.format(
                    self.fqdn, vm.dataset_obj['route_network']))
Exemple #19
0
    def check_vm(self, vm, offline):
        """Check whether a VM can run on this hypervisor"""
        # Cheap checks should always be executed first to save time
        # and fail early. Same goes for checks that are more likely to fail.

        # Immediately check whether HV is even supported.
        if not offline:
            # Compatbile OS?
            os_pair = (vm.hypervisor.dataset_obj['os'], self.dataset_obj['os'])
            if os_pair not in MIGRATE_CONFIG:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *os_pair))

            # Compatible CPU model?
            hw_pair = (
                vm.hypervisor.dataset_obj['hardware_model'],
                self.dataset_obj['hardware_model'],
            )
            cpu_pair = [
                arch for arch, models in KVM_HWMODEL_TO_CPUMODEL.items()
                for model in hw_pair if model in models
            ]
            if cpu_pair[0] != cpu_pair[1]:
                raise HypervisorError(
                    '{} to {} migration is not supported online.'.format(
                        *hw_pair))

        # HV in supported state?
        if self.dataset_obj['state'] not in ['online', 'online_reserved']:
            raise InvalidStateError(
                'Hypervisor "{}" is not in online state ({}).'.format(
                    self.fqdn, self.dataset_obj['state']))

        # Enough CPUs?
        if vm.dataset_obj['num_cpu'] > self.dataset_obj['num_cpu']:
            raise HypervisorError(
                'Not enough CPUs. Destination Hypervisor has {0}, '
                'but VM requires {1}.'.format(self.dataset_obj['num_cpu'],
                                              vm.dataset_obj['num_cpu']))

        # Proper VLAN?
        if not self.get_vlan_network(vm.dataset_obj['intern_ip']):
            raise HypervisorError(
                'Hypervisor "{}" does not support route_network "{}".'.format(
                    self.fqdn, vm.route_network))

        # Those checks below all require libvirt connection,
        # so execute them last to avoid unnecessary overhead if possible.

        # Enough memory?
        free_mib = self.free_vm_memory()
        if vm.dataset_obj['memory'] > free_mib:
            raise HypervisorError(
                'Not enough memory. '
                'Destination Hypervisor has {:.2f} MiB but VM requires {} MiB '
                .format(free_mib, vm.dataset_obj['memory']))

        # Enough disk?
        free_disk_space = self.get_free_disk_size_gib()
        vm_disk_size = float(vm.dataset_obj['disk_size_gib'])
        if vm_disk_size > free_disk_space:
            raise HypervisorError(
                'Not enough free space in VG {} to build VM while keeping'
                ' {} GiB reserved'.format(
                    VG_NAME, RESERVED_DISK[self.get_storage_type()]))

        # VM already defined? Least likely, if at all.
        if self.vm_defined(vm):
            raise HypervisorError('VM "{}" is already defined on "{}".'.format(
                vm.fqdn, self.fqdn))
Exemple #20
0
def vm_build(vm_hostname,
             run_puppet=True,
             debug_puppet=False,
             postboot=None,
             allow_reserved_hv=False,
             rebuild=False):
    """Create a VM and start it

    Puppet in run once to configure baseline networking.
    """

    with ExitStack() as es:
        vm = es.enter_context(_get_vm(vm_hostname))

        if vm.dataset_obj['igvm_operation_mode'] == 'aws':
            jenv = Environment(loader=PackageLoader('igvm', 'templates'))
            template = jenv.get_template('aws_user_data.cfg')
            user_data = template.render(
                hostname=vm.dataset_obj['hostname'].rstrip('.ig.local'),
                fqdn=vm.dataset_obj['hostname'],
                apt_repos=AWS_CONFIG[0]['apt'],
                puppet_master_addr=AWS_CONFIG[0]['puppet']['master_addr'],
                puppet_master=vm.dataset_obj['puppet_master'],
                puppet_ca=vm.dataset_obj['puppet_ca'],
                puppet_ca_addr=AWS_CONFIG[0]['puppet']['ca_addr'],
            )

            vm.aws_build(run_puppet=run_puppet,
                         debug_puppet=debug_puppet,
                         postboot=user_data)
        elif vm.dataset_obj['igvm_operation_mode'] == 'kvm':
            if vm.hypervisor:
                es.enter_context(_lock_hv(vm.hypervisor))
            else:
                vm.hypervisor = es.enter_context(
                    _get_best_hypervisor(
                        vm,
                        ['online', 'online_reserved']
                        if allow_reserved_hv else ['online'],
                        True,
                    ))
                vm.dataset_obj['hypervisor'] = \
                    vm.hypervisor.dataset_obj['hostname']

            if vm.hypervisor.vm_defined(vm) and vm.is_running():
                raise InvalidStateError('"{}" is still running.'.format(
                    vm.fqdn))

            if rebuild and vm.hypervisor.vm_defined(vm):
                vm.hypervisor.undefine_vm(vm)

            vm.build(
                run_puppet=run_puppet,
                debug_puppet=debug_puppet,
                postboot=postboot,
            )
        else:
            raise NotImplementedError(
                'This operation is not yet supported for {}'.format(
                    vm.dataset_obj['igvm_operation_mode']))

        vm.dataset_obj.commit()