コード例 #1
0
    def get_snapshot_xml(self, snap_info):
        """Libvirt snapshot XML"""
        if 'diskType' in snap_info:
            if self.diskType != snap_info['diskType']:
                raise exception.UnsupportedOperation(
                    "Unexpected diskType",
                    drive_disk_type=self.diskType,
                    snapshot_disk_type=snap_info["diskType"])

        if self.diskType == DISK_TYPE.NETWORK:
            if self.protocol != snap_info['protocol']:
                raise exception.UnsupportedOperation(
                    "Unexpected protocol",
                    drive_protocol=self.protocol,
                    snapshot_protocol=snap_info["protocol"])

        disk = vmxml.Element('disk',
                             name=self.name,
                             snapshot='external',
                             type=self.diskType)

        drive_info = snap_info.copy()
        drive_info["diskType"] = self.diskType
        snap_elem = _getSourceXML(drive_info)

        # Type attribute is needed but not documented:
        # https://bugzilla.redhat.com/1452103
        snap_elem.setAttrs(type=self.diskType)

        disk.appendChild(snap_elem)
        return disk
コード例 #2
0
def _getDriverXML(drive):
    driver = vmxml.Element('driver')
    driverAttrs = {'name': 'qemu'}

    if drive['diskType'] == DISK_TYPE.BLOCK:
        driverAttrs['io'] = 'native'
    else:
        driverAttrs['io'] = 'threads'

    if drive['format'] == 'cow':
        driverAttrs['type'] = 'qcow2'
    elif drive['format']:
        driverAttrs['type'] = 'raw'

    if 'discard' in drive and drive['discard']:
        driverAttrs['discard'] = 'unmap'

    try:
        driverAttrs['iothread'] = str(drive['specParams']['pinToIoThread'])
    except KeyError:
        pass

    driverAttrs['cache'] = drive['cache']

    if (drive['propagateErrors'] == 'on' or
            conv.tobool(drive['propagateErrors'])):
        driverAttrs['error_policy'] = 'enospace'
    else:
        driverAttrs['error_policy'] = 'stop'

    driver.setAttrs(**driverAttrs)
    return driver
コード例 #3
0
def _getSourceXML(drive):
    """
    Makes a libvirt <source> element for specified drive.

    Arguments:
        drive (dict like): Drive description, A dict or an object
        implementing __getitem__.

    Returns:
        Element: libvirt source element in a form of
                 <source file='/image'/>
    """
    needs_seclabel = False

    source = vmxml.Element('source')
    if drive["diskType"] == DISK_TYPE.BLOCK:
        needs_seclabel = True
        source.setAttrs(dev=drive["path"])
    elif drive["diskType"] == DISK_TYPE.NETWORK:
        source.setAttrs(protocol=drive["protocol"], name=drive["path"])
        for host in drive["hosts"]:
            source.appendChildWithArgs('host', **host)
    elif drive["diskType"] == DISK_TYPE.FILE:
        needs_seclabel = True
        source.setAttrs(file=drive["path"])
        if drive["device"] == 'cdrom' or drive["device"] == 'floppy':
            source.setAttrs(startupPolicy='optional')
    else:
        raise RuntimeError("Unsupported diskType %r", drive["diskType"])

    if needs_seclabel:
        disable_dynamic_ownership(source)

    return source
コード例 #4
0
ファイル: libvirtxml.py プロジェクト: olegtikhonov/vdsm
    def __init__(self, conf, log, arch):
        """
        Create the skeleton of a libvirt domain xml

        <domain type="kvm">
            <name>vmName</name>
            <uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid>
            <memory>262144</memory>
            <currentMemory>262144</currentMemory>
            <vcpu current='smp'>160</vcpu>
            <devices>
            </devices>
        </domain>

        """
        self.conf = conf
        self.log = log

        self.arch = arch

        if conv.tobool(self.conf.get('kvmEnable', 'true')):
            domainType = 'kvm'
        else:
            domainType = 'qemu'

        domainAttrs = {'type': domainType}

        self.dom = vmxml.Element('domain', **domainAttrs)

        self.dom.appendChildWithArgs('name', text=self.conf['vmName'])
        self.dom.appendChildWithArgs('uuid', text=self.conf['vmId'])
        if 'numOfIoThreads' in self.conf:
            self.dom.appendChildWithArgs('iothreads',
                                         text=str(self.conf['numOfIoThreads']))
        memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024)
        self.dom.appendChildWithArgs('memory', text=memSizeKB)
        self.dom.appendChildWithArgs('currentMemory', text=memSizeKB)
        if 'maxMemSize' in self.conf:
            maxMemSizeKB = str(int(self.conf['maxMemSize']) * 1024)
            maxMemSlots = str(self.conf.get('maxMemSlots', '16'))
            self.dom.appendChildWithArgs('maxMemory', text=maxMemSizeKB,
                                         slots=maxMemSlots)
        vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus())
        vcpu.setAttrs(**{'current': self._getSmp()})

        self._devices = vmxml.Element('devices')
        self.dom.appendChild(self._devices)
コード例 #5
0
ファイル: backup.py プロジェクト: kobihk/vdsm
def create_checkpoint_xml(backup_cfg, drives):
    if backup_cfg.to_checkpoint_id is None:
        return None

    # create the checkpoint XML for a backup
    checkpoint = vmxml.Element('domaincheckpoint')

    name = vmxml.Element('name')
    name.appendTextNode(backup_cfg.to_checkpoint_id)
    checkpoint.appendChild(name)

    cp_description = "checkpoint for backup '{}'".format(backup_cfg.backup_id)
    description = vmxml.Element('description')
    description.appendTextNode(cp_description)
    checkpoint.appendChild(description)

    if backup_cfg.parent_checkpoint_id is not None:
        cp_parent = vmxml.Element('parent')
        parent_name = vmxml.Element('name')
        parent_name.appendTextNode(backup_cfg.parent_checkpoint_id)
        cp_parent.appendChild(parent_name)
        checkpoint.appendChild(cp_parent)

    disks = vmxml.Element('disks')
    for disk in backup_cfg.disks:
        if disk.checkpoint:
            drive = drives[disk.img_id]
            disk_elm = vmxml.Element('disk',
                                     name=drive.name,
                                     checkpoint='bitmap')
            disks.appendChild(disk_elm)

    checkpoint.appendChild(disks)

    return xmlutils.tostring(checkpoint)
コード例 #6
0
    def appendOs(self, use_serial_console=False):
        """
        Add <os> element to domain:

        <os>
            <type arch="x86_64" machine="pc">hvm</type>
            <boot dev="cdrom"/>
            <kernel>/tmp/vmlinuz-2.6.18</kernel>
            <initrd>/tmp/initrd-2.6.18.img</initrd>
            <cmdline>ARGs 1</cmdline>
            <smbios mode="sysinfo"/>
        </os>

        If 'use_serial_console' is true and we are on x86, use the console:

        <os>
            ...
            <bios useserial="yes"/>
        </os>

        """

        oselem = vmxml.Element('os')
        self.dom.appendChild(oselem)

        machine = self.conf.get('emulatedMachine',
                                _DEFAULT_MACHINES[self.arch])

        oselem.appendChildWithArgs('type',
                                   text='hvm',
                                   arch=self.arch,
                                   machine=machine)

        qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'}
        for c in self.conf.get('boot', ''):
            oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c])

        if self.conf.get('initrd'):
            oselem.appendChildWithArgs('initrd', text=self.conf['initrd'])

        if self.conf.get('kernel'):
            oselem.appendChildWithArgs('kernel', text=self.conf['kernel'])

        if self.conf.get('kernelArgs'):
            oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs'])

        if cpuarch.is_x86(self.arch):
            oselem.appendChildWithArgs('smbios', mode='sysinfo')

        if conv.tobool(self.conf.get('bootMenuEnable', False)):
            oselem.appendChildWithArgs('bootmenu',
                                       enable='yes',
                                       timeout=str(_BOOT_MENU_TIMEOUT))

        if use_serial_console and cpuarch.is_x86(self.arch):
            oselem.appendChildWithArgs('bios', useserial='yes')
コード例 #7
0
 def getReplicaXML(self):
     disk = vmxml.Element(
         "disk",
         device=self.diskReplicate["device"],
         snapshot="no",
         type=self.diskReplicate["diskType"],
     )
     disk.appendChild(_getSourceXML(self.diskReplicate))
     disk.appendChild(_getDriverXML(self.diskReplicate))
     return disk
コード例 #8
0
 def _appendAgentDevice(self, path, name):
     """
       <channel type='unix'>
          <target type='virtio' name='org.linux-kvm.port.0'/>
          <source mode='bind' path='/tmp/socket'/>
       </channel>
     """
     channel = vmxml.Element('channel', type='unix')
     channel.appendChildWithArgs('target', type='virtio', name=name)
     channel.appendChildWithArgs('source', mode='bind', path=path)
     self._devices.appendChild(channel)
コード例 #9
0
ファイル: backup.py プロジェクト: kobihk/vdsm
def create_backup_xml(address, drives, scratch_disks, from_checkpoint_id=None):
    domainbackup = vmxml.Element('domainbackup', mode='pull')

    if from_checkpoint_id is not None:
        incremental = vmxml.Element('incremental')
        incremental.appendTextNode(from_checkpoint_id)
        domainbackup.appendChild(incremental)

    server = vmxml.Element('server',
                           transport=address.transport,
                           socket=address.path)

    domainbackup.appendChild(server)

    disks = vmxml.Element('disks')

    # fill the backup XML disks
    for drive in drives.values():
        disk = vmxml.Element('disk', name=drive.name, type='file')
        # scratch element can have dev=/path/to/block/disk
        # or file=/path/to/file/disk attribute according to
        # the disk type.
        # Currently, all the scratch disks resides on the
        # host local file storage.
        scratch = vmxml.Element('scratch', file=scratch_disks[drive.name])

        storage.disable_dynamic_ownership(scratch, write_type=False)
        disk.appendChild(scratch)

        disks.appendChild(disk)

    domainbackup.appendChild(disks)

    return xmlutils.tostring(domainbackup)
コード例 #10
0
ファイル: libvirtxml.py プロジェクト: olegtikhonov/vdsm
    def appendHostdevNumaTune(self, devices):
        """
        Automatically generate numatune for VM with host devices. This tuning
        should prefer numa nodes where device's MMIO region resides.
        """

        numatune = vmxml.Element('numatune')
        numa_map = [dev_object.numa_node for dev_object in devices if
                    dev_object.is_hostdevice and dev_object.numa_node]
        if len(set(numa_map)) == 1:
            numatune.appendChildWithArgs('memory', mode='preferred',
                                         nodeset=numa_map[0])
            self.dom.appendChild(numatune)
コード例 #11
0
def io_tune_list_to_dom(tunables):
    """
    This method converts a list of VmDiskDeviceTuneLimits dictionaries
    to XML representation.

    :param tunables: List of VmDiskDeviceTuneLimits dictionaries
    :return: DOM XML all device nodes
    """
    io_tune = vmxml.Element("ioTune")

    for tune in tunables:
        device = io_tune_to_dom(tune)
        vmxml.append_child(io_tune, device)

    return io_tune
コード例 #12
0
def _getSourceXML(drive):
    source = vmxml.Element('source')
    if drive["diskType"] == DISK_TYPE.BLOCK:
        source.setAttrs(dev=drive["path"])
    elif drive["diskType"] == DISK_TYPE.NETWORK:
        source.setAttrs(protocol=drive["protocol"], name=drive["path"])
        for host in drive["hosts"]:
            source.appendChildWithArgs('host', **host)
    elif drive["diskType"] == DISK_TYPE.FILE:
        source.setAttrs(file=drive["path"])
        if drive["device"] == 'cdrom' or drive["device"] == 'floppy':
            source.setAttrs(startupPolicy='optional')
    else:
        raise RuntimeError("Unsupported diskType %r", drive["diskType"])
    return source
コード例 #13
0
 def get_bandwidth_xml(specParams, oldBandwidth=None):
     """Returns a valid libvirt xml dom element object."""
     bandwidth = vmxml.Element('bandwidth')
     old = {} if oldBandwidth is None else dict(
         (vmxml.tag(elem), elem) for elem in vmxml.children(oldBandwidth))
     for key in ('inbound', 'outbound'):
         elem = specParams.get(key)
         if elem is None:  # Use the old setting if present
             if key in old:
                 bandwidth.appendChild(etree_element=old[key])
         elif elem:
             # Convert the values to string for adding them to the XML def
             attrs = dict((key, str(value)) for key, value in elem.items())
             bandwidth.appendChildWithArgs(key, **attrs)
     return bandwidth
コード例 #14
0
def create_backup_xml(address, backup_disks, from_checkpoint_id=None):
    domainbackup = vmxml.Element('domainbackup', mode='pull')

    if from_checkpoint_id is not None:
        incremental = vmxml.Element('incremental')
        incremental.appendTextNode(from_checkpoint_id)
        domainbackup.appendChild(incremental)

    server = vmxml.Element('server',
                           transport=address.transport,
                           socket=address.path)

    domainbackup.appendChild(server)

    disks = vmxml.Element('disks')

    # fill the backup XML disks
    for backup_disk in backup_disks.values():
        disk = vmxml.Element('disk',
                             name=backup_disk.drive.name,
                             type=backup_disk.scratch_disk.type)

        # If backup mode reported by the engine it should be added
        # to the backup XML.
        if backup_disk.backup_mode is not None:
            vmxml.set_attr(disk, "backupmode", backup_disk.backup_mode)

            if backup_disk.backup_mode == MODE_INCREMENTAL:
                # if backupmode is 'incremental' we should also provide the
                # checkpoint ID we start the incremental backup from.
                vmxml.set_attr(disk, MODE_INCREMENTAL, from_checkpoint_id)

        # scratch element can have dev=/path/to/block/disk
        # or file=/path/to/file/disk attribute according to
        # the disk type.
        if backup_disk.scratch_disk.type == DISK_TYPE.BLOCK:
            scratch = vmxml.Element('scratch',
                                    dev=backup_disk.scratch_disk.path)
        else:
            scratch = vmxml.Element('scratch',
                                    file=backup_disk.scratch_disk.path)

        storage.disable_dynamic_ownership(scratch, write_type=False)
        disk.appendChild(scratch)

        disks.appendChild(disk)

    domainbackup.appendChild(disks)

    return xmlutils.tostring(domainbackup)
コード例 #15
0
def io_tune_values_to_dom(values, dom):
    """
    Create a DOM representation of the passed iotune values and
    attach it to the dom object in the form of nodes.

    :param values: Dictionary mapping iotune key to its value
    :param dom: XML DOM object to attach the result to
    """
    ops = ("total", "read", "write")
    units = ("bytes", "iops")

    for op, unit in itertools.product(ops, units):
        name = op + "_" + unit + "_sec"
        if name in values and values[name] >= 0:
            el = vmxml.Element(name)
            el.appendTextNode(str(values[name]))
            vmxml.append_child(dom, el)
コード例 #16
0
    def getXML(self):
        """
        Return xml element.

        <lease>
            <key>12523e3d-ad22-410c-8977-d2a7bf458a65</key>
            <lockspace>c2a6d7c8-8d81-4e01-9ed4-7eb670713448</lockspace>
            <target offset="1048576"
                    path="/dev/c2a6d7c8-8d81-4e01-9ed4-7eb670713448/leases"/>
        </lease>

        :rtype: `vmxml.Element`
        """
        lease = vmxml.Element('lease')
        lease.appendChildWithArgs('key', text=self.lease_id)
        lease.appendChildWithArgs('lockspace', text=self.sd_id)
        lease.appendChildWithArgs('target', path=self.path,
                                  offset=str(self.offset))
        return lease
コード例 #17
0
ファイル: libvirtxml.py プロジェクト: olegtikhonov/vdsm
    def appendFeatures(self):
        """
        Add machine features to domain xml.

        Currently only
        <features>
            <acpi/>
        <features/>

        for hyperv:
        <features>
            <acpi/>
            <hyperv>
                <relaxed state='on'/>
            </hyperv>
        <features/>
        """

        if (conv.tobool(self.conf.get('acpiEnable', 'true')) or
                conv.tobool(self.conf.get('hypervEnable', 'false'))):
            features = self.dom.appendChildWithArgs('features')

        if conv.tobool(self.conf.get('acpiEnable', 'true')):
            features.appendChildWithArgs('acpi')

        if conv.tobool(self.conf.get('hypervEnable', 'false')):
            hyperv = vmxml.Element('hyperv')
            features.appendChild(hyperv)

            hyperv.appendChildWithArgs('relaxed', state='on')
            # turns off an internal Windows watchdog, and by doing so avoids
            # some high load BSODs.
            hyperv.appendChildWithArgs('vapic', state='on')
            # magic number taken from recomendations. References:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1083529#c10
            # https://bugzilla.redhat.com/show_bug.cgi?id=1053846#c0
            hyperv.appendChildWithArgs(
                'spinlocks', state='on', retries='8191')
コード例 #18
0
def create_checkpoint_xml(backup_cfg, drives):
    if backup_cfg.to_checkpoint_id is None:
        return None

    # create the checkpoint XML for a backup
    checkpoint = vmxml.Element('domaincheckpoint')

    name = vmxml.Element('name')
    name.appendTextNode(backup_cfg.to_checkpoint_id)
    checkpoint.appendChild(name)

    cp_description = "checkpoint for backup '{}'".format(backup_cfg.backup_id)
    description = vmxml.Element('description')
    description.appendTextNode(cp_description)
    checkpoint.appendChild(description)

    if backup_cfg.creation_time:
        creation_time = vmxml.Element('creationTime')
        creation_time.appendTextNode(str(backup_cfg.creation_time))
        checkpoint.appendChild(creation_time)

    # When the XML is created for redefining a checkpoint,
    # the checkpoint may not contain disks at all, for e.g -
    # old disks that were removed/detached from the VM.
    # In that case, we should not add the <disks> element.
    if backup_cfg.disks:
        disks = vmxml.Element('disks')
        for disk in backup_cfg.disks:
            if disk.checkpoint:
                drive = drives[disk.img_id]
                disk_elm = vmxml.Element('disk',
                                         name=drive.name,
                                         checkpoint='bitmap',
                                         bitmap=backup_cfg.to_checkpoint_id)
                disks.appendChild(disk_elm)

        checkpoint.appendChild(disks)

    return xmlutils.tostring(checkpoint)
コード例 #19
0
ファイル: graphics.py プロジェクト: xin49/vdsm
 def getSpiceVmcChannelsXML(self):
     vmc = vmxml.Element('channel', type='spicevmc')
     vmc.appendChildWithArgs('target',
                             type='virtio',
                             name='com.redhat.spice.0')
     return vmc
コード例 #20
0
    def appendEmulator(self):
        emulatorPath = '/usr/bin/qemu-system-' + self.arch

        emulator = vmxml.Element('emulator', text=emulatorPath)

        self._devices.appendChild(emulator)
コード例 #21
0
    def appendCpu(self, hugepages_shared=False):
        """
        Add guest CPU definition.

        <cpu match="exact">
            <model>qemu64</model>
            <topology sockets="S" cores="C" threads="T"/>
            <feature policy="require" name="sse2"/>
            <feature policy="disable" name="svm"/>
        </cpu>

        For POWER8, there is no point in trying to use baseline CPU for flags
        since there are only HW features. There are 2 ways of creating a valid
        POWER8 element that we support:

            <cpu>
                <model>POWER{X}</model>
            </cpu>

        This translates to -cpu POWER{X} (where {X} is version of the
        processor - 7 and 8), which tells qemu to emulate the CPU in POWER8
        family that it's capable of emulating - in case of hardware
        virtualization, that will be the host cpu (so an equivalent of
        -cpu host). Using this option does not limit migration between POWER8
        machines - it is still possible to migrate from e.g. POWER8 to
        POWER8e. The second option is not supported and serves only for
        reference:

            <cpu mode="host-model">
                <model>power{X}</model>
            </cpu>

        where {X} is the binary compatibility version of POWER that we
        require (6, 7, 8). This translates to qemu's -cpu host,compat=power{X}.

        Using the second option also does not limit migration between POWER8
        machines - it is still possible to migrate from e.g. POWER8 to POWER8e.
        """

        cpu = vmxml.Element('cpu')

        if cpuarch.is_x86(self.arch):
            cpu.setAttrs(match='exact')

            features = self.conf.get('cpuType', 'qemu64').split(',')
            model = features[0]

            if model == 'hostPassthrough':
                cpu.setAttrs(mode='host-passthrough')
            elif model == 'hostModel':
                cpu.setAttrs(mode='host-model')
            else:
                cpu.appendChildWithArgs('model', text=model)

                # This hack is for backward compatibility as the libvirt
                # does not allow 'qemu64' guest on intel hardware
                if model == 'qemu64' and '+svm' not in features:
                    features += ['-svm']

                for feature in features[1:]:
                    # convert Linux name of feature to libvirt
                    if feature[1:6] == 'sse4_':
                        feature = feature[0] + 'sse4.' + feature[6:]

                    featureAttrs = {'name': feature[1:]}
                    if feature[0] == '+':
                        featureAttrs['policy'] = 'require'
                    elif feature[0] == '-':
                        featureAttrs['policy'] = 'disable'
                    cpu.appendChildWithArgs('feature', **featureAttrs)
        elif cpuarch.is_ppc(self.arch):
            features = self.conf.get('cpuType', 'POWER8').split(',')
            model = features[0]
            cpu.appendChildWithArgs('model', text=model)

        if ('smpCoresPerSocket' in self.conf
                or 'smpThreadsPerCore' in self.conf):
            maxVCpus = int(self._getMaxVCpus())
            cores = int(self.conf.get('smpCoresPerSocket', '1'))
            threads = int(self.conf.get('smpThreadsPerCore', '1'))
            cpu.appendChildWithArgs('topology',
                                    sockets=str(maxVCpus // cores // threads),
                                    cores=str(cores),
                                    threads=str(threads))

        # CPU-pinning support
        # see http://www.ovirt.org/wiki/Features/Design/cpu-pinning
        if 'cpuPinning' in self.conf:
            cputune = vmxml.Element('cputune')
            cpuPinning = self.conf.get('cpuPinning')
            for cpuPin in sorted(cpuPinning.keys()):
                cputune.appendChildWithArgs('vcpupin',
                                            vcpu=cpuPin,
                                            cpuset=cpuPinning[cpuPin])
            self.dom.appendChild(cputune)

        # Guest numa topology support
        # see http://www.ovirt.org/Features/NUMA_and_Virtual_NUMA
        if 'guestNumaNodes' in self.conf:
            numa = vmxml.Element('numa')
            guestNumaNodes = sorted(self.conf.get('guestNumaNodes'),
                                    key=itemgetter('nodeIndex'))
            for vmCell in guestNumaNodes:
                nodeMem = int(vmCell['memory']) * 1024
                numa_args = {'cpus': vmCell['cpus'], 'memory': str(nodeMem)}
                if hugepages_shared:
                    numa_args.update({'memAccess': 'shared'})
                numa.appendChildWithArgs('cell', **numa_args)
            cpu.appendChild(numa)

        self.dom.appendChild(cpu)
コード例 #22
0
ファイル: snapshot.py プロジェクト: None-later/vdsm
        def memory_snapshot(memory_volume_path):
            """Libvirt snapshot XML"""

            return vmxml.Element('memory',
                                 snapshot='external',
                                 file=memory_volume_path)
コード例 #23
0
 def _getTargetXML(self):
     target = vmxml.Element('target', dev=self.name)
     if self.iface:
         target.setAttrs(bus=self.iface)
     return target
コード例 #24
0
 def __init__(self, conf, log, arch):
     self.conf = conf
     self.log = log
     self.arch = arch
     self.dom = vmxml.Element('domain', type='kvm')
コード例 #25
0
 def test_append_child_too_many_args(self):
     empty = vmxml.find_first(self._dom, 'empty')
     self.assertRaises(RuntimeError, vmxml.append_child, empty,
                       vmxml.Element('new'),
                       xmlutils.fromstring('<new/>'))
コード例 #26
0
 def test_append_child(self):
     empty = vmxml.find_first(self._dom, 'empty')
     vmxml.append_child(empty, vmxml.Element('new'))
     self.assertIsNotNone(vmxml.find_first(self._dom, 'new', None))
     empty = vmxml.find_first(self._dom, 'empty')
     self.assertIsNotNone(vmxml.find_first(empty, 'new', None))
コード例 #27
0
ファイル: graphics.py プロジェクト: xin49/vdsm
    def getXML(self):
        """
        Create domxml for a graphics framebuffer.

        <graphics type='spice' port='5900' tlsPort='5901' autoport='yes'
                  listen='0' keymap='en-us'
                  passwdValidTo='1970-01-01T00:00:01'>
          <listen type='address' address='0'/>
          <clipboard copypaste='no'/>
        </graphics>
        OR
        <graphics type='vnc' port='5900' autoport='yes' listen='0'
                  keymap='en-us' passwdValidTo='1970-01-01T00:00:01'>
          <listen type='address' address='0'/>
        </graphics>

        """

        graphicsAttrs = {
            'type': self.device,
            'port': self.port,
            'autoport': 'yes',
        }
        if config.getboolean('vars', 'ssl'):
            graphicsAttrs['defaultMode'] = 'secure'
        # the default, 'any', has automatic fallback to
        # insecure mode, so works with ssl off.

        if self.device == 'spice':
            graphicsAttrs['tlsPort'] = self.tlsPort

        self._setPasswd(graphicsAttrs)

        if 'keyMap' in self.specParams:
            graphicsAttrs['keymap'] = self.specParams['keyMap']

        graphics = vmxml.Element('graphics', **graphicsAttrs)

        if not conv.tobool(self.specParams.get('copyPasteEnable', True)):
            clipboard = vmxml.Element('clipboard', copypaste='no')
            graphics.appendChild(clipboard)

        if not conv.tobool(self.specParams.get('fileTransferEnable', True)):
            filetransfer = vmxml.Element('filetransfer', enable='no')
            graphics.appendChild(filetransfer)

        # This list could be dropped in 4.1. We should keep only
        # the default mode, which is both simpler and safer.
        if (self.device == 'spice'
                and 'spiceSecureChannels' in self.specParams):
            for chan in self._getSpiceChannels():
                graphics.appendChildWithArgs('channel',
                                             name=chan,
                                             mode='secure')

        display_network = self.specParams.get('displayNetwork')
        if display_network:
            graphics.appendChildWithArgs(
                'listen',
                type='network',
                network=libvirtnetwork.netname_o2l(display_network))
        else:
            graphics.setAttrs(listen='0')

        return graphics
コード例 #28
0
ファイル: snapshot.py プロジェクト: None-later/vdsm
    def snapshot(self):
        """Live snapshot command"""
        def norm_snap_drive_params(drive):
            """Normalize snapshot parameters"""

            if "baseVolumeID" in drive:
                base_drv = {
                    "device": "disk",
                    "domainID": drive["domainID"],
                    "imageID": drive["imageID"],
                    "volumeID": drive["baseVolumeID"]
                }
                target_drv = base_drv.copy()
                target_drv["volumeID"] = drive["volumeID"]

            elif "baseGUID" in drive:
                base_drv = {"GUID": drive["baseGUID"]}
                target_drv = {"GUID": drive["GUID"]}

            elif "baseUUID" in drive:
                base_drv = {"UUID": drive["baseUUID"]}
                target_drv = {"UUID": drive["UUID"]}

            else:
                base_drv, target_drv = (None, None)

            return base_drv, target_drv

        def rollback_drives(new_drives):
            """Rollback the prepared volumes for the snapshot"""

            for vm_dev_name, drive in new_drives.items():
                try:
                    self.vm.cif.teardownVolumePath(drive)
                except Exception:
                    self.vm.log.exception("Unable to teardown drive: %s",
                                          vm_dev_name)

        def memory_snapshot(memory_volume_path):
            """Libvirt snapshot XML"""

            return vmxml.Element('memory',
                                 snapshot='external',
                                 file=memory_volume_path)

        def vm_conf_for_memory_snapshot():
            """Returns the needed vm configuration with the memory snapshot"""

            return {
                'restoreFromSnapshot': True,
                '_srcDomXML': self.vm.migratable_domain_xml(),
                'elapsedTimeOffset': time.time() - self.vm.start_time
            }

        snap = vmxml.Element('domainsnapshot')
        disks = vmxml.Element('disks')
        new_drives = {}
        vm_drives = {}

        for drive in self.snap_drives:
            base_drv, tget_drv = norm_snap_drive_params(drive)

            try:
                self.vm.findDriveByUUIDs(tget_drv)
            except LookupError:
                # The vm is not already using the requested volume for the
                # snapshot, continuing.
                pass
            else:
                # The snapshot volume is the current one, skipping
                self.vm.log.debug("The volume is already in use: %s", tget_drv)
                continue  # Next drive

            try:
                vm_drive = self.vm.findDriveByUUIDs(base_drv)
            except LookupError:
                # The volume we want to snapshot doesn't exist
                self.vm.log.error("The base volume doesn't exist: %s",
                                  base_drv)
                return response.error('snapshotErr')

            if vm_drive.hasVolumeLeases:
                self.vm.log.error('disk %s has volume leases', vm_drive.name)
                return response.error('noimpl')

            if vm_drive.transientDisk:
                self.vm.log.error('disk %s is a transient disk', vm_drive.name)
                return response.error('transientErr')

            vm_dev_name = vm_drive.name

            new_drives[vm_dev_name] = tget_drv.copy()
            new_drives[vm_dev_name]["type"] = "disk"
            new_drives[vm_dev_name]["diskType"] = vm_drive.diskType
            new_drives[vm_dev_name]["poolID"] = vm_drive.poolID
            new_drives[vm_dev_name]["name"] = vm_dev_name
            new_drives[vm_dev_name]["format"] = "cow"

            # We need to keep track of the drive object because
            # it keeps original data and used to generate snapshot element.
            # We keep the old volume ID so we can clear the block threshold.
            vm_drives[vm_dev_name] = (vm_drive, base_drv["volumeID"])

        prepared_drives = {}

        for vm_dev_name, vm_device in new_drives.items():
            # Adding the device before requesting to prepare it as we want
            # to be sure to teardown it down even when prepareVolumePath
            # failed for some unknown issue that left the volume active.
            prepared_drives[vm_dev_name] = vm_device
            try:
                new_drives[vm_dev_name]["path"] = \
                    self.vm.cif.prepareVolumePath(new_drives[vm_dev_name])
            except Exception:
                self.vm.log.exception(
                    'unable to prepare the volume path for '
                    'disk %s', vm_dev_name)
                rollback_drives(prepared_drives)
                return response.error('snapshotErr')

            drive, _ = vm_drives[vm_dev_name]
            snapelem = drive.get_snapshot_xml(vm_device)
            disks.appendChild(snapelem)

        snap.appendChild(disks)

        snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
                      | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)

        if self.memory_params:
            # Save the needed vm configuration
            # TODO: this, as other places that use pickle.dump
            # directly to files, should be done with outOfProcess
            vm_conf_vol = self.memory_params['dstparams']
            vm_conf_vol_path = self.vm.cif.prepareVolumePath(vm_conf_vol)
            try:
                with open(vm_conf_vol_path, "rb+") as f:
                    vm_conf = vm_conf_for_memory_snapshot()
                    # protocol=2 is needed for clusters < 4.4
                    # (for Python 2 host compatibility)
                    data = pickle.dumps(vm_conf, protocol=2)

                    # Ensure that the volume is aligned; qemu-img may segfault
                    # when converting unligned images.
                    # https://bugzilla.redhat.com/1649788
                    aligned_length = utils.round(len(data), 4096)
                    data = data.ljust(aligned_length, b"\0")

                    f.write(data)
                    f.flush()
                    os.fsync(f.fileno())
            finally:
                self.vm.cif.teardownVolumePath(vm_conf_vol)

            # Adding the memory volume to the snapshot xml
            memory_vol = self.memory_params['dst']
            memory_vol_path = self.vm.cif.prepareVolumePath(memory_vol)
            snap.appendChild(memory_snapshot(memory_vol_path))
        else:
            memory_vol = memory_vol_path = None
            snap_flags |= libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY

        snapxml = xmlutils.tostring(snap)
        # TODO: this is debug information. For 3.6.x we still need to
        # see the XML even with 'info' as default level.
        self.vm.log.info("%s", snapxml)

        self._snapshot_job['memoryVolPath'] = memory_vol_path
        self._snapshot_job['memoryVol'] = memory_vol
        self._snapshot_job['newDrives'] = new_drives
        vm_drives_serialized = {}
        for k, v in vm_drives.items():
            vm_drives_serialized[k] = [xmlutils.tostring(v[0].getXML()), v[1]]
        self._snapshot_job['vmDrives'] = vm_drives_serialized
        self.vm.update_snapshot_metadata(self._snapshot_job)

        # We need to stop the drive monitoring for two reasons, one is to
        # prevent spurious libvirt errors about missing drive paths (since
        # we're changing them), and also to prevent to trigger a drive
        # extension for the new volume with the apparent size of the old one
        # (the apparentsize is updated as last step in updateDriveParameters)
        self.vm.drive_monitor.disable()

        try:
            if self.should_freeze:
                self.vm.freeze()
            try:
                self.vm.log.info(
                    "Taking a live snapshot (drives=%s,"
                    "memory=%s)",
                    ', '.join(drive["name"] for drive in new_drives.values()),
                    self.memory_params is not None)
                self.vm.run_dom_snapshot(snapxml, snap_flags)
                self.vm.log.info("Completed live snapshot")
            except libvirt.libvirtError:
                self.vm.log.exception("Unable to take snapshot")
                if self.should_freeze:
                    self.vm.thaw()
                return response.error('snapshotErr')
        except:
            # In case the VM was shutdown in the middle of the snapshot
            # operation we keep doing the finalizing and reporting the failure.
            self._finalize_vm(memory_vol)
            res = False
        else:
            res = self.teardown(memory_vol_path, memory_vol, new_drives,
                                vm_drives)
        if not res:
            raise RuntimeError("Failed to execute snapshot, "
                               "considering the operation as failure")
コード例 #29
0
 def _getIotuneXML(self):
     iotune = vmxml.Element('iotune')
     for key, value in sorted(self.iotune.items()):
         iotune.appendChildWithArgs(key, text=str(value))
     return iotune
コード例 #30
0
 def _getAuthXML(self):
     auth = vmxml.Element("auth", username=self.auth["username"])
     auth.appendChildWithArgs("secret",
                              type=self.auth["type"],
                              uuid=self.auth["uuid"])
     return auth