示例#1
0
    def get_base_config(self, instance, vif, image_meta, inst_type, virt_type):
        conf = vconfig.LibvirtConfigGuestInterface()
        # Default to letting libvirt / the hypervisor choose the model
        model = None
        driver = None
        vhost_queues = None

        # If the user has specified a 'vif_model' against the
        # image then honour that model
        if image_meta:
            model = osinfo.HardwareProperties(image_meta).network_model

        # Else if the virt type is KVM/QEMU, use virtio according
        # to the global config parameter
        if (model is None and virt_type in ('kvm', 'qemu')
                and CONF.libvirt.use_virtio_for_bridges):
            model = network_model.VIF_MODEL_VIRTIO

        # Workaround libvirt bug, where it mistakenly
        # enables vhost mode, even for non-KVM guests
        if (model == network_model.VIF_MODEL_VIRTIO and virt_type == "qemu"):
            driver = "qemu"

        if not is_vif_model_valid_for_virt(virt_type, model):
            raise exception.UnsupportedHardware(model=model, virt=virt_type)
        if (virt_type == 'kvm' and model == network_model.VIF_MODEL_VIRTIO):
            vhost_drv, vhost_queues = self._get_virtio_mq_settings(
                image_meta, inst_type)
            driver = vhost_drv or driver

        designer.set_vif_guest_frontend_config(conf, vif['address'], model,
                                               driver, vhost_queues)

        return conf
示例#2
0
    def get_config(self, instance, vif, image_meta, inst_type):
        conf = vconfig.LibvirtConfigGuestInterface()
        # Default to letting libvirt / the hypervisor choose the model
        model = None
        driver = None

        # If the user has specified a 'vif_model' against the
        # image then honour that model
        if image_meta:
            vif_model = image_meta.get('properties',
                                       {}).get('hw_vif_model')
            if vif_model is not None:
                model = vif_model

        # Else if the virt type is KVM/QEMU, use virtio according
        # to the global config parameter
        if (model is None and
            CONF.libvirt.virt_type in ('kvm', 'qemu') and
                    CONF.libvirt.use_virtio_for_bridges):
            model = "virtio"

        # Workaround libvirt bug, where it mistakenly
        # enables vhost mode, even for non-KVM guests
        if model == "virtio" and CONF.libvirt.virt_type == "qemu":
            driver = "qemu"

        if not is_vif_model_valid_for_virt(CONF.libvirt.virt_type,
                                           model):
            raise exception.UnsupportedHardware(model=model,
                                                virt=CONF.libvirt.virt_type)

        designer.set_vif_guest_frontend_config(
            conf, vif['address'], model, driver)

        return conf
示例#3
0
def get_disk_bus_for_device_type(virt_type,
                                 image_meta,
                                 device_type="disk",
                                 instance=None):
    """Determine the best disk bus to use for a device type.

       Considering the currently configured virtualization
       type, return the optimal disk_bus to use for a given
       device type. For example, for a disk on KVM it will
       return 'virtio', while for a CDROM it will return 'ide'
       on x86_64 and 'scsi' on ppc64.

       Returns the disk_bus, or returns None if the device
       type is not supported for this virtualization
    """

    # Prefer a disk bus set against the image first of all
    key = "hw_" + device_type + "_bus"
    disk_bus = image_meta.get('properties', {}).get(key)
    if disk_bus is not None:
        if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
            raise exception.UnsupportedHardware(model=disk_bus,
                                                virt=virt_type)
        return disk_bus

    # Otherwise pick a hypervisor default disk bus
    if virt_type == "uml":
        if device_type == "disk":
            return "uml"
    elif virt_type == "lxc":
        return "lxc"
    elif virt_type == "xen":
        guest_vm_mode = None
        if instance:
            guest_vm_mode = vm_mode.get_from_instance(instance)
        if guest_vm_mode == vm_mode.HVM:
            return "ide"
        else:
            return "xen"
    elif virt_type in ("qemu", "kvm"):
        if device_type == "cdrom":
            guestarch = libvirt_utils.get_arch(image_meta)
            if guestarch in (arch.PPC, arch.PPC64, arch.S390, arch.S390X):
                return "scsi"
            else:
                return "ide"
        elif device_type == "disk":
            return "virtio"
        elif device_type == "floppy":
            return "fdc"
    elif virt_type == "parallels":
        if device_type == "cdrom":
            return "ide"
        elif device_type == "disk":
            return "sata"
    else:
        # If virt-type not in list then it is unsupported
        raise exception.UnsupportedVirtType(virt=virt_type)

    return None
示例#4
0
def get_disk_bus_for_device_type(virt_type,
                                 image_meta=None,
                                 device_type="disk"):
    """Determine the best disk bus to use for a device type.

       Considering the currently configured virtualization
       type, return the optimal disk_bus to use for a given
       device type. For example, for a disk on KVM it will
       return 'virtio', while for a CDROM it will return 'ide'

       Returns the disk_bus, or returns None if the device
       type is not supported for this virtualization
    """

    # Prefer a disk bus set against the image first of all
    if image_meta:
        key = "hw_" + device_type + "_bus"
        disk_bus = image_meta.get('properties', {}).get(key)
        if disk_bus is not None:
            if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
                raise exception.UnsupportedHardware(model=disk_bus,
                                                    virt=virt_type)
            return disk_bus

    # Otherwise pick a hypervisor default disk bus
    if virt_type == "uml":
        if device_type == "disk":
            return "uml"
    elif virt_type == "lxc":
        return "lxc"
    elif virt_type == "xen":
        if device_type == "cdrom":
            return "ide"
        elif device_type == "disk":
            return "xen"
    elif virt_type in ("qemu", "kvm"):
        if device_type == "cdrom":
            return "ide"
        elif device_type == "disk":
            return "virtio"
        elif device_type == "floppy":
            return "fdc"

    return None
示例#5
0
文件: vif.py 项目: yyjinlong/nova
    def get_base_config(self, instance, vif, image_meta, inst_type, virt_type):
        conf = vconfig.LibvirtConfigGuestInterface()
        # Default to letting libvirt / the hypervisor choose the model
        model = None
        driver = None

        # NOTE(jinlong): For add the nic multi queue, the multi queue number
        # default equal vcpu number.
        flavor = instance.flavor
        conf.vcpus = flavor.vcpus

        # If the user has specified a 'vif_model' against the
        # image then honour that model
        if image_meta:
            vif_model = image_meta.get('properties', {}).get('hw_vif_model')
            if vif_model is not None:
                model = vif_model

        # Else if the virt type is KVM/QEMU, use virtio according
        # to the global config parameter
        if (model is None and virt_type in ('kvm', 'qemu')
                and CONF.libvirt.use_virtio_for_bridges):
            model = network_model.VIF_MODEL_VIRTIO

        # Workaround libvirt bug, where it mistakenly
        # enables vhost mode, even for non-KVM guests
        if (model == network_model.VIF_MODEL_VIRTIO and virt_type == "qemu"):
            driver = "qemu"

        if not is_vif_model_valid_for_virt(virt_type, model):
            raise exception.UnsupportedHardware(model=model, virt=virt_type)

        designer.set_vif_guest_frontend_config(conf, vif['address'], model,
                                               driver)

        return conf
示例#6
0
文件: vif.py 项目: YLTiny/nova
    def get_base_config(self, instance, mac, image_meta, inst_type, virt_type,
                        vnic_type):
        # TODO(sahid): We should rewrite it. This method handles too
        # many unrelated things. We probably need to have a specific
        # virtio, vhost, vhostuser functions.

        conf = vconfig.LibvirtConfigGuestInterface()
        # Default to letting libvirt / the hypervisor choose the model
        model = None
        driver = None
        vhost_queues = None
        rx_queue_size = None

        # NOTE(stephenfin): Skip most things here as only apply to virtio
        # devices
        if vnic_type in network_model.VNIC_TYPES_DIRECT_PASSTHROUGH:
            designer.set_vif_guest_frontend_config(conf, mac, model, driver,
                                                   vhost_queues, rx_queue_size)
            return conf

        rx_queue_size = CONF.libvirt.rx_queue_size

        # if model has already been defined,
        # image_meta contents will override it
        model = self.get_vif_model(image_meta=image_meta, vif_model=model)

        if not is_vif_model_valid_for_virt(virt_type, model):
            raise exception.UnsupportedHardware(model=model, virt=virt_type)

        # The rest of this only applies to virtio
        if model != network_model.VIF_MODEL_VIRTIO:
            designer.set_vif_guest_frontend_config(conf, mac, model, driver,
                                                   vhost_queues, rx_queue_size)
            return conf

        # Workaround libvirt bug, where it mistakenly enables vhost mode, even
        # for non-KVM guests
        if virt_type == 'qemu':
            driver = 'qemu'

        if virt_type in ('kvm', 'parallels'):
            vhost_drv, vhost_queues = self._get_virtio_mq_settings(
                image_meta, inst_type)
            # TODO(sahid): It seems that we return driver 'vhost' even
            # for vhostuser interface where for vhostuser interface
            # the driver should be 'vhost-user'. That currently does
            # not create any issue since QEMU ignores the driver
            # argument for vhostuser interface but we should probably
            # fix that anyway. Also we should enforce that the driver
            # use vhost and not None.
            driver = vhost_drv or driver

        if driver == 'vhost' or driver is None:
            # vhost backend only supports update of RX queue size
            if CONF.libvirt.rx_queue_size:
                # TODO(sahid): Specifically force driver to be vhost
                # that because if None we don't generate the XML
                # driver element needed to set the queue size
                # attribute. This can be removed when get_base_config
                # will be fixed and rewrite to set the correct
                # backend.
                driver = 'vhost'

        designer.set_vif_guest_frontend_config(conf, mac, model, driver,
                                               vhost_queues, rx_queue_size)

        return conf
示例#7
0
文件: vif.py 项目: yirenjie/TestNova
    def get_base_config(self, instance, mac, image_meta,
                        inst_type, virt_type, vnic_type, host):
        # TODO(sahid): We should rewrite it. This method handles too
        # many unrelated things. We probably need to have a specific
        # virtio, vhost, vhostuser functions.

        conf = vconfig.LibvirtConfigGuestInterface()
        # Default to letting libvirt / the hypervisor choose the model
        model = None
        driver = None
        vhost_queues = None

        # If the user has specified a 'vif_model' against the
        # image then honour that model
        if image_meta:
            model = osinfo.HardwareProperties(image_meta).network_model

        # Note(moshele): Skip passthough vnic_types as they don't support
        # virtio model.
        if vnic_type not in network_model.VNIC_TYPES_DIRECT_PASSTHROUGH:
            # Else if the virt type is KVM/QEMU/VZ(Parallels), then use virtio
            # according to the global config parameter
            if (model is None and
                virt_type in ('kvm', 'qemu', 'parallels') and
                        CONF.libvirt.use_virtio_for_bridges):
                model = network_model.VIF_MODEL_VIRTIO

        # Workaround libvirt bug, where it mistakenly
        # enables vhost mode, even for non-KVM guests
        if (model == network_model.VIF_MODEL_VIRTIO and
            virt_type == "qemu"):
            driver = "qemu"

        if not is_vif_model_valid_for_virt(virt_type,
                                           model):
            raise exception.UnsupportedHardware(model=model,
                                                virt=virt_type)
        if (virt_type in ('kvm', 'parallels') and
            model == network_model.VIF_MODEL_VIRTIO):
            vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta,
                                                                   inst_type)
            # TODO(sahid): It seems that we return driver 'vhost' even
            # for vhostuser interface where for vhostuser interface
            # the driver should be 'vhost-user'. That currently does
            # not create any issue since QEMU ignores the driver
            # argument for vhostuser interface but we should probably
            # fix that anyway. Also we should enforce that the driver
            # use vhost and not None.
            driver = vhost_drv or driver

        rx_queue_size = None
        # Note(moshele): rx_queue_size is support only for virtio model
        if model == network_model.VIF_MODEL_VIRTIO:
            if driver == 'vhost' or driver is None:
                # vhost backend only supports update of RX queue size
                rx_queue_size, _ = self._get_virtio_queue_sizes(host)
                if rx_queue_size:
                    # TODO(sahid): Specifically force driver to be vhost
                    # that because if None we don't generate the XML
                    # driver element needed to set the queue size
                    # attribute. This can be removed when get_base_config
                    # will be fixed and rewrite to set the correct
                    # backend.
                    driver = 'vhost'

        designer.set_vif_guest_frontend_config(
            conf, mac, model, driver, vhost_queues, rx_queue_size)

        return conf
示例#8
0
def get_disk_bus_for_device_type(instance,
                                 virt_type,
                                 image_meta,
                                 device_type="disk"):
    """Determine the best disk bus to use for a device type.

    Considering the currently configured virtualization type, return the
    optimal disk_bus to use for a given device type. For example, for a disk
    on KVM it will return 'virtio', while for a CDROM, it will return 'ide'
    for the 'pc' machine type on x86_64, 'sata' for the 'q35' machine type on
    x86_64 and 'scsi' on ppc64.

    Returns the disk_bus, or returns None if the device type is not supported
    for this virtualization
    """

    # Prefer a disk bus set against the image first of all
    if device_type == "disk":
        disk_bus = osinfo.HardwareProperties(image_meta).disk_model
    else:
        key = "hw_" + device_type + "_bus"
        disk_bus = image_meta.properties.get(key)
    if disk_bus is not None:
        if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
            raise exception.UnsupportedHardware(model=disk_bus,
                                                virt=virt_type)
        return disk_bus

    # Otherwise pick a hypervisor default disk bus
    if virt_type == "uml":
        if device_type == "disk":
            return "uml"
    elif virt_type == "lxc":
        return "lxc"
    elif virt_type == "xen":
        guest_vm_mode = obj_fields.VMMode.get_from_instance(instance)
        if guest_vm_mode == obj_fields.VMMode.HVM:
            return "ide"
        else:
            return "xen"
    elif virt_type in ("qemu", "kvm"):
        if device_type == "cdrom":
            guestarch = libvirt_utils.get_arch(image_meta)
            if guestarch in (
                    obj_fields.Architecture.PPC,
                    obj_fields.Architecture.PPC64,
                    obj_fields.Architecture.PPCLE,
                    obj_fields.Architecture.PPC64LE,
                    obj_fields.Architecture.S390,
                    obj_fields.Architecture.S390X,
                    obj_fields.Architecture.AARCH64):
                return "scsi"
            machine_type = libvirt_utils.get_machine_type(image_meta)
            # NOTE(lyarwood): We can't be any more explicit here as QEMU
            # provides a version of the Q35 machine type per release.
            # Additionally downstream distributions can also provide their own.
            if machine_type and 'q35' in machine_type:
                # NOTE(lyarwood): The Q35 machine type does not provide an IDE
                # bus and as such we must use a SATA bus for cdroms.
                return "sata"
            else:
                return "ide"
        elif device_type == "disk":
            return "virtio"
        elif device_type == "floppy":
            return "fdc"
    elif virt_type == "parallels":
        if device_type == "cdrom":
            return "ide"
        elif device_type == "disk":
            return "scsi"
    else:
        # If virt-type not in list then it is unsupported
        raise exception.UnsupportedVirtType(virt=virt_type)

    return None