def __init__(self, conn=None): """ Initialize device state @param conn: libvirt connection to validate device against @type conn: virConnect """ if not self._virtual_device_type: raise ValueError(_("Virtual device type must be set in subclass.")) if self._virtual_device_type not in self.virtual_device_types: raise ValueError( _("Unknown virtual device type '%s'.") % self._virtual_device_type) if conn: if not isinstance(conn, libvirt.virConnect): raise ValueError, _("'conn' must be a virConnect instance") self._conn = conn self.__remote = None if self.conn: self.__remote = _util.is_uri_remote(self.conn.getURI()) self._caps = None if self.conn: self._caps = CapabilitiesParser.parse(self.conn.getCapabilities())
def __init__(self, conn=None): """ Initialize device state @param conn: libvirt connection to validate device against @type conn: virConnect """ if not self._virtual_device_type: raise ValueError(_("Virtual device type must be set in subclass.")) if self._virtual_device_type not in self.virtual_device_types: raise ValueError(_("Unknown virtual device type '%s'.") % self._virtual_device_type) if conn: if not isinstance(conn, libvirt.virConnect): raise ValueError, _("'conn' must be a virConnect instance") self._conn = conn self.__remote = None if self.conn: self.__remote = _util.is_uri_remote(self.conn.getURI()) self._caps = None if self.conn: self._caps = CapabilitiesParser.parse(self.conn.getCapabilities())
def generate_cpuset(conn, mem): """ Generates a cpu pinning string based on host NUMA configuration. If host doesn't have a suitable NUMA configuration, a RuntimeError is thrown. """ caps = CapabilitiesParser.parse(conn.getCapabilities()) if caps.host.topology is None: raise RuntimeError(_("No topology section in capabilities xml.")) cells = caps.host.topology.cells if len(cells) <= 1: raise RuntimeError( _("Capabilities only show <= 1 cell. " "Not NUMA capable")) # Capabilities tells us about the available memory 'cells' on the # system. Each 'cell' has associated 'cpu's. # # Use getCellsFreeMemory to determine which 'cell' has the smallest # amount of memory which fits the requested VM memory amount, then # pin the VM to that 'cell's associated 'cpu's cell_mem = conn.getCellsFreeMemory(0, len(cells)) cell_id = -1 mem = mem * 1024 for i in range(len(cells)): if cell_mem[i] < mem: # Cell doesn't have enough mem to fit, skip it continue if len(cells[i].cpus) == 0: # No cpus to use for the cell continue # Find smallest cell that fits if cell_id < 0 or cell_mem[i] < cell_mem[cell_id]: cell_id = i if cell_id < 0: raise RuntimeError( _("Could not find any usable NUMA " "cell/cpu combinations.")) # Build cpuset string cpustr = "" for cpu in cells[cell_id].cpus: if cpustr != "": cpustr += "," cpustr += str(cpu.id) return cpustr
def generate_cpuset(conn, mem): """ Generates a cpu pinning string based on host NUMA configuration. If host doesn't have a suitable NUMA configuration, a RuntimeError is thrown. """ caps = CapabilitiesParser.parse(conn.getCapabilities()) if caps.host.topology is None: raise RuntimeError(_("No topology section in capabilities xml.")) cells = caps.host.topology.cells if len(cells) <= 1: raise RuntimeError(_("Capabilities only show <= 1 cell. " "Not NUMA capable")) # Capabilities tells us about the available memory 'cells' on the # system. Each 'cell' has associated 'cpu's. # # Use getCellsFreeMemory to determine which 'cell' has the smallest # amount of memory which fits the requested VM memory amount, then # pin the VM to that 'cell's associated 'cpu's cell_mem = conn.getCellsFreeMemory(0, len(cells)) cell_id = -1 mem = mem * 1024 for i in range(len(cells)): if cell_mem[i] < mem: # Cell doesn't have enough mem to fit, skip it continue if len(cells[i].cpus) == 0: # No cpus to use for the cell continue # Find smallest cell that fits if cell_id < 0 or cell_mem[i] < cell_mem[cell_id]: cell_id = i if cell_id < 0: raise RuntimeError(_("Could not find any usable NUMA " "cell/cpu combinations.")) # Build cpuset string cpustr = "" for cpu in cells[cell_id].cpus: if cpustr != "": cpustr += "," cpustr += str(cpu.id) return cpustr
def __init__(self, conn): self.conn = conn self._caps = CapabilitiesParser.parse(conn.getCapabilities()) self._type = self.SECLABEL_TYPE_DYNAMIC self._model = None self._label = None self._imagelabel = None model = self._caps.host.secmodel.model if not model: raise ValueError("Hypervisor does not have any security driver" "enabled") self.model = model
def pygrub_path(conn=None): """ Return the pygrub path for the current host, or connection if available. """ # FIXME: This should be removed/deprecated when capabilities are # fixed to provide bootloader info if conn: cap = CapabilitiesParser.parse(conn.getCapabilities()) if (cap.host.arch == "i86pc"): return "/usr/lib/xen/bin/pygrub" else: return "/usr/bin/pygrub" if platform.system() == "SunOS": return "/usr/lib/xen/bin/pygrub" return "/usr/bin/pygrub"
def __init__(self, image, capabilities=None, boot_index=None, conn=None): Installer.Installer.__init__(self, conn=conn) self._arch = None self._image = image # Set capabilities if self.conn: self._capabilities = Cap.parse(self.conn.getCapabilities()) elif capabilities: if not isinstance(capabilities, Cap.Capabilities): raise ValueError( _("'capabilities' must be a " "Capabilities instance.")) self._capabilities = capabilities else: raise ValueError(_("'conn' or 'capabilities' must be specified.")) # Set boot _boot_caps/_boot_parameters if boot_index is None: self._boot_caps = match_boots(self._capabilities, self.image.domain.boots) if self._boot_caps is None: raise ImageInstallerException( _("Could not find suitable boot " "descriptor for this host")) else: if (boot_index < 0 or (boot_index + 1) > len(image.domain.boots)): raise ValueError(_("boot_index out of range.")) self._boot_caps = image.domain.boots[boot_index] # Set up internal caps.guest object self._guest = self._capabilities.guestForOSType( self.boot_caps.type, self.boot_caps.arch) if self._guest is None: raise PlatformMatchException( _("Unsupported virtualization type: " "%s %s" % (self.boot_caps.type, self.boot_caps.arch))) self.os_type = self.boot_caps.type self._domain = self._guest.bestDomainType() self.type = self._domain.hypervisor_type self.arch = self._guest.arch
def __init__(self, image, capabilities=None, boot_index=None, conn=None): Installer.Installer.__init__(self, conn=conn) self._arch = None self._image = image # Set capabilities if self.conn: self._capabilities = Cap.parse(self.conn.getCapabilities()) elif capabilities: if not isinstance(capabilities, Cap.Capabilities): raise ValueError(_("'capabilities' must be a " "Capabilities instance.")) self._capabilities = capabilities else: raise ValueError(_("'conn' or 'capabilities' must be specified.")) # Set boot _boot_caps/_boot_parameters if boot_index is None: self._boot_caps = match_boots(self._capabilities, self.image.domain.boots) if self._boot_caps is None: raise ImageInstallerException(_("Could not find suitable boot " "descriptor for this host")) else: if (boot_index < 0 or (boot_index + 1) > len(image.domain.boots)): raise ValueError(_("boot_index out of range.")) self._boot_caps = image.domain.boots[boot_index] # Set up internal caps.guest object self._guest = self._capabilities.guestForOSType(self.boot_caps.type, self.boot_caps.arch) if self._guest is None: raise PlatformMatchException(_("Unsupported virtualization type: " "%s %s" % (self.boot_caps.type, self.boot_caps.arch))) self.os_type = self.boot_caps.type self._domain = self._guest.bestDomainType() self.type = self._domain.hypervisor_type self.arch = self._guest.arch
def _get_caps(self): if not self.__caps and self.conn: self.__caps = CapabilitiesParser.parse(self.conn.getCapabilities()) return self.__caps
def __init__(self, type=None, connection=None, hypervisorURI=None, installer=None): # Set up the connection, since it is fundamental for other init self.conn = connection if self.conn == None: logging.debug("No conn passed to Guest, opening URI '%s'" % \ hypervisorURI) self.conn = libvirt.open(hypervisorURI) if self.conn == None: raise RuntimeError, _("Unable to connect to hypervisor, aborting " "installation!") # We specifically ignore the 'type' parameter here, since # it has been replaced by installer.type, and child classes can # use it when creating a default installer. ignore = type self._installer = installer self._name = None self._uuid = None self._memory = None self._maxmemory = None self._vcpus = 1 self._cpuset = None self._graphics_dev = None self._autostart = False self._clock = Clock(self.conn) self._seclabel = None self._description = None self.features = None self._replace = None self._os_type = None self._os_variant = None self._os_autodetect = False # DEPRECATED: Public device lists unaltered by install process self.disks = [] self.nics = [] self.sound_devs = [] self.hostdevs = [] # General device list. Only access through API calls (even internally) self._devices = [] # Device list to use/alter during install process. Don't access # directly, use internal APIs self._install_devices = [] # The libvirt virDomain object we 'Create' self.domain = None self._consolechild = None # Default disk target prefix ('hd' or 'xvd'). Set in subclass self.disknode = None # Default bus for disks (set in subclass) self._diskbus = None self._caps = CapabilitiesParser.parse(self.conn.getCapabilities()) # Add default devices (if applicable) self._default_input_device = None self._default_console_device = None inp = self._get_default_input_device() con = self._get_default_console_device() if inp: self.add_device(inp) self._default_input_device = inp if con: self.add_device(con) self._default_console_device = con