def testDiskNumbers(self): self.assertEquals("a", VirtualDisk.num_to_target(1)) self.assertEquals("b", VirtualDisk.num_to_target(2)) self.assertEquals("z", VirtualDisk.num_to_target(26)) self.assertEquals("aa", VirtualDisk.num_to_target(27)) self.assertEquals("ab", VirtualDisk.num_to_target(28)) self.assertEquals("az", VirtualDisk.num_to_target(52)) self.assertEquals("ba", VirtualDisk.num_to_target(53)) self.assertEquals("zz", VirtualDisk.num_to_target(27 * 26)) self.assertEquals("aaa", VirtualDisk.num_to_target(27 * 26 + 1)) self.assertEquals(VirtualDisk.target_to_num("hda"), 0) self.assertEquals(VirtualDisk.target_to_num("hdb"), 1) self.assertEquals(VirtualDisk.target_to_num("sdz"), 25) self.assertEquals(VirtualDisk.target_to_num("sdaa"), 26) self.assertEquals(VirtualDisk.target_to_num("vdab"), 27) self.assertEquals(VirtualDisk.target_to_num("vdaz"), 51) self.assertEquals(VirtualDisk.target_to_num("xvdba"), 52) self.assertEquals(VirtualDisk.target_to_num("xvdzz"), 26 * (25 + 1) + 25) self.assertEquals(VirtualDisk.target_to_num("xvdaaa"), 26 * 26 * 1 + 26 * 1 + 0) disk = virtinst.VirtualDisk(utils.get_conn()) disk.bus = "ide" self.assertEquals("hda", disk.generate_target([])) self.assertEquals("hdb", disk.generate_target(["hda"])) self.assertEquals("hdc", disk.generate_target(["hdb", "sda"])) self.assertEquals("hdb", disk.generate_target(["hda", "hdd"])) disk.bus = "virtio-scsi" self.assertEquals("sdb", disk.generate_target(["sda", "sdg", "sdi"], 0)) self.assertEquals("sdh", disk.generate_target(["sda", "sdg"], 1))
def parse_disk_entry(conn, disks, fullkey, value): """ Parse a particular key/value for a disk. FIXME: this should be a lot smarter. """ # skip bus values, e.g. 'scsi0.present = "TRUE"' if re.match(r"^(scsi|ide)[0-9]+[^:]", fullkey): return ignore, bus, bus_nr, inst, key = re.split( r"^(scsi|ide)([0-9]+):([0-9]+)\.", fullkey) lvalue = value.lower() if key == "present" and lvalue == "false": return # Does anyone else think it's scary that we're still doing things # like this? if bus == "ide": inst = int(bus_nr) * 2 + (int(inst) % 2) elif bus == "scsi": inst = int(bus_nr) * 16 + (int(inst) % 16) disk = None for checkdisk in disks: if checkdisk.bus == bus and getattr(checkdisk, "vmx_inst") == inst: disk = checkdisk break if not disk: disk = virtinst.VirtualDisk(conn) disk.bus = bus setattr(disk, "vmx_inst", inst) disks.append(disk) if key == "devicetype": if (lvalue == "atapi-cdrom" or lvalue == "cdrom-raw" or lvalue == "cdrom-image"): disk.device = "cdrom" if key == "filename": disk.path = value fmt = "raw" if lvalue.endswith(".vmdk"): fmt = "vmdk" # See if the filename is actually a VMDK descriptor file newpath = parse_vmdk(disk.path) if newpath: logging.debug("VMDK file parsed path %s->%s", disk.path, newpath) disk.path = newpath disk.driver_type = fmt
def validate_storage(self, vmname, path=None, device="disk", collidelist=None): if self.is_default_storage(): # Make sure default pool is running ret = self._check_default_pool_active() if not ret: return False if path is None: if self.is_default_storage(): path = self.get_default_path(vmname, collidelist or []) else: path = self.widget("storage-entry").get_text().strip() if not path and device in ["disk", "lun"]: return self.err.val_err(_("A storage path must be specified.")) disk = virtinst.VirtualDisk(self.conn.get_backend()) disk.path = path or None disk.device = device if disk.wants_storage_creation(): pool = disk.get_parent_pool() size = uiutil.spin_get_helper(self.widget("storage-size")) sparse = False vol_install = virtinst.VirtualDisk.build_vol_install( disk.conn, os.path.basename(disk.path), pool, size, sparse) disk.set_vol_install(vol_install) fmt = self.conn.get_default_storage_format() if fmt in disk.get_vol_install().list_formats(): logging.debug("Using default prefs format=%s for path=%s", fmt, disk.path) disk.get_vol_install().format = fmt else: logging.debug( "path=%s can not use default prefs format=%s, " "not setting it", disk.path, fmt) disk.validate() return disk
def _setup_disks(self, config): self.__guest.disks = [] if config.get_enable_storage(): path = None if config.get_use_local_storage(): if self.storage_pool_exists("default") is False: self.define_storage_pool("default") pool = self.__conn.storagePoolLookupByName("default") path = virtinst.Storage.StorageVolume.find_free_name( config.get_guest_name(), pool_object=pool, suffix=".img") path = os.path.join(DEFAULT_POOL_TARGET_PATH, path) else: volume = self.get_storage_volume(config.get_storage_pool(), config.get_storage_volume()) path = volume.path() if path is not None: storage = virtinst.VirtualDisk(conn=self.__conn, path=path, size=config.get_storage_size()) self.__guest.disks.append(storage) self.__guest.conn = self.__conn
def testSingleDisk(self): xml = ("""<disk type="file" device="disk"><source file="/a.img"/>\n""" """<target dev="hda" bus="ide"/></disk>\n""") d = virtinst.VirtualDisk(conn, parsexml=xml) self._set_and_check(d, "target", "hda", "hdb") self.assertEquals(xml.replace("hda", "hdb"), d.get_xml_config())
def _make_guest(installer=None, conn=None, os_variant=None): if conn is None: conn = _default_conn g = conn.caps.lookup_virtinst_guest() g.type = "kvm" g.name = "TestGuest" g.memory = int(200 * 1024) g.maxmemory = int(400 * 1024) g.uuid = "12345678-1234-1234-1234-123456789012" gdev = virtinst.VirtualGraphics(conn) gdev.type = "vnc" gdev.keymap = "ja" g.add_device(gdev) g.features.pae = False g.vcpus = 5 if not installer: installer = _make_installer(conn=conn) g.installer = installer g.emulator = "/usr/lib/xen/bin/qemu-dm" g.os.arch = "i686" g.os.os_type = "hvm" if os_variant: g.os_variant = os_variant g.add_default_input_device() g.add_default_console_device() g.add_device(virtinst.VirtualAudio(g.conn)) # Floppy disk path = "/dev/default-pool/testvol1.img" d = VirtualDisk(conn) d.path = path d.device = d.DEVICE_FLOPPY d.validate() g.add_device(d) # File disk path = "/dev/default-pool/new-test-suite.img" d = virtinst.VirtualDisk(conn) d.path = path if d.wants_storage_creation(): parent_pool = d.get_parent_pool() vol_install = virtinst.VirtualDisk.build_vol_install(conn, os.path.basename(path), parent_pool, .0000001, True) d.set_vol_install(vol_install) d.validate() g.add_device(d) # Block disk path = "/dev/disk-pool/diskvol1" d = virtinst.VirtualDisk(conn) d.path = path d.validate() g.add_device(d) # Network device dev = virtinst.VirtualNetworkInterface(conn) dev.macaddr = "22:22:33:44:55:66" dev.type = virtinst.VirtualNetworkInterface.TYPE_VIRTUAL dev.source = "default" g.add_device(dev) return g
def _import_file(doc, ctx, conn, input_file): ignore = doc def xpath_str(path): ret = ctx.xpathEval(path) result = None if ret is not None: if type(ret) == list: if len(ret) >= 1: result = ret[0].content else: result = ret return result def bool_val(val): if str(val).lower() == "false": return False elif str(val).lower() == "true": return True return False def xpath_nodechildren(path): # Return the children of the first node found by the xpath nodes = ctx.xpathEval(path) if not nodes: return [] return node_list(nodes[0]) def _lookup_disk_path(path): fmt = "vmdk" ref = None def _path_has_prefix(prefix): if path.startswith(prefix): return path[len(prefix):] if path.startswith("ovf:" + prefix): return path[len("ovf:" + prefix):] return False if _path_has_prefix("/disk/"): disk_ref = _path_has_prefix("/disk/") xpath = (_make_section_xpath(envbase, "DiskSection") + "/ovf:Disk[@ovf:diskId='%s']" % disk_ref) if not ctx.xpathEval(xpath): raise ValueError( _("Unknown disk reference id '%s' " "for path %s.") % (path, disk_ref)) ref = xpath_str(xpath + "/@ovf:fileRef") elif _path_has_prefix("/file/"): ref = _path_has_prefix("/file/") else: raise ValueError(_("Unknown storage path type %s." % path)) xpath = (envbase + "/ovf:References/ovf:File[@ovf:id='%s']" % ref) if not ctx.xpathEval(xpath): raise ValueError( _("Unknown reference id '%s' " "for path %s.") % (ref, path)) return xpath_str(xpath + "/@ovf:href"), fmt is_ovirt_format = False envbase = "/ovf:Envelope[1]" vsbase = envbase + "/ovf:VirtualSystem" if not ctx.xpathEval(vsbase): vsbase = envbase + "/ovf:Content[@xsi:type='ovf:VirtualSystem_Type']" is_ovirt_format = True def _make_section_xpath(base, section_name): if is_ovirt_format: return (base + "/ovf:Section[@xsi:type='ovf:%s_Type']" % section_name) return base + "/ovf:%s" % section_name osbase = _make_section_xpath(vsbase, "OperatingSystemSection") vhstub = _make_section_xpath(vsbase, "VirtualHardwareSection") if not ctx.xpathEval(vsbase): raise RuntimeError("Did not find any VirtualSystem section") if not ctx.xpathEval(vhstub): raise RuntimeError("Did not find any VirtualHardwareSection") vhbase = vhstub + "/ovf:Item[rasd:ResourceType='%s']" # General info name = xpath_str(vsbase + "/ovf:Name") desc = xpath_str(vsbase + "/ovf:AnnotationSection/ovf:Annotation") if not desc: desc = xpath_str(vsbase + "/ovf:Description") vcpus = xpath_str((vhbase % DEVICE_CPU) + "/rasd:VirtualQuantity") sockets = xpath_str((vhbase % DEVICE_CPU) + "/rasd:num_of_sockets") cores = xpath_str((vhbase % DEVICE_CPU) + "/rasd:num_of_cores") mem = xpath_str((vhbase % DEVICE_MEMORY) + "/rasd:VirtualQuantity") alloc_mem = xpath_str((vhbase % DEVICE_MEMORY) + "/rasd:AllocationUnits") os_id = xpath_str(osbase + "/@id") os_version = xpath_str(osbase + "/@version") # This is the VMWare OS name os_vmware = xpath_str(osbase + "/@osType") logging.debug("OS parsed as: id=%s version=%s vmware=%s", os_id, os_version, os_vmware) # Sections that we handle # NetworkSection is ignored, since I don't have an example of # a valid section in the wild. parsed_sections = [ "References", "DiskSection", "NetworkSection", "VirtualSystem" ] # Check for unhandled 'required' sections for env_node in xpath_nodechildren(envbase): if env_node.name in parsed_sections: continue elif env_node.isText(): continue logging.debug("Unhandled XML section '%s'", env_node.name) if not bool_val(env_node.prop("required")): continue raise StandardError( _("OVF section '%s' is listed as " "required, but parser doesn't know " "how to handle it.") % env_node.name) disk_buses = {} for node in ctx.xpathEval(vhbase % DEVICE_IDE_BUS): instance_id = _get_child_content(node, "InstanceID") disk_buses[instance_id] = "ide" for node in ctx.xpathEval(vhbase % DEVICE_SCSI_BUS): instance_id = _get_child_content(node, "InstanceID") disk_buses[instance_id] = "scsi" ifaces = [] for node in ctx.xpathEval(vhbase % DEVICE_ETHERNET): iface = virtinst.VirtualNetworkInterface(conn) # XXX: Just ignore 'source' info and choose the default net_model = _get_child_content(node, "ResourceSubType") if net_model and not net_model.isdigit(): iface.model = net_model.lower() iface.set_default_source() ifaces.append(iface) disks = [] for node in ctx.xpathEval(vhbase % DEVICE_DISK): bus_id = _get_child_content(node, "Parent") path = _get_child_content(node, "HostResource") bus = disk_buses.get(bus_id, "ide") fmt = "raw" if path: path, fmt = _lookup_disk_path(path) disk = virtinst.VirtualDisk(conn) disk.path = path disk.driver_type = fmt disk.bus = bus disk.device = "disk" disks.append(disk) # XXX: Convert these OS values to something useful ignore = os_version ignore = os_id ignore = os_vmware (capsguest, capsdomain) = conn.caps.guest_lookup() guest = conn.caps.build_virtinst_guest(conn, capsguest, capsdomain) guest.installer = virtinst.ImportInstaller(conn) if not name: name = os.path.basename(input_file) guest.name = name.replace(" ", "_") guest.description = desc or None if vcpus: guest.vcpus = int(vcpus) elif sockets or cores: if sockets: guest.cpu.sockets = int(sockets) if cores: guest.cpu.cores = int(cores) guest.cpu.vcpus_from_topology() if mem: guest.memory = _convert_alloc_val(alloc_mem, mem) * 1024 for dev in ifaces + disks: guest.add_device(dev) return guest
def start_install(name=None, ram=None, disks=None, mac=None, uuid=None, extra=None, vcpus=None, profile_data=None, arch=None, no_gfx=False, fullvirt=True, bridge=None, virt_type=None, virt_auto_boot=False, qemu_driver_type=None, qemu_net_type=None): vtype = "qemu" if virtinst.util.is_kvm_capable(): vtype = "kvm" arch = None # let virtinst.FullVirtGuest() default to the host arch elif virtinst.util.is_kqemu_capable(): vtype = "kqemu" print "- using qemu hypervisor, type=%s" % vtype if arch is not None and arch.lower() in ["x86", "i386"]: arch = "i686" guest = virtinst.FullVirtGuest(hypervisorURI="qemu:///system", type=vtype, arch=arch) if not profile_data.has_key("file"): # images don't need to source this if not profile_data.has_key("install_tree"): raise koan.InfoException( "Cannot find install source in kickstart file, aborting.") if not profile_data["install_tree"].endswith("/"): profile_data["install_tree"] = profile_data["install_tree"] + "/" # virt manager doesn't like nfs:// and just wants nfs: # (which cobbler should fix anyway) profile_data["install_tree"] = profile_data["install_tree"].replace( "nfs://", "nfs:") if profile_data.has_key("file"): # this is an image based installation input_path = profile_data["file"] print "- using image location %s" % input_path if input_path.find(":") == -1: # this is not an NFS path guest.cdrom = input_path else: (tempdir, filename) = utils.nfsmount(input_path) guest.cdrom = os.path.join(tempdir, filename) kickstart = profile_data.get("kickstart", "") if kickstart != "": # we have a (windows?) answer file we have to provide # to the ISO. print "I want to make a floppy for %s" % kickstart floppy_path = utils.make_floppy(kickstart) guest.disks.append( virtinst.VirtualDisk(device=virtinst.VirtualDisk.DEVICE_FLOPPY, path=floppy_path)) else: guest.location = profile_data["install_tree"] extra = extra.replace("&", "&") guest.extraargs = extra if profile_data.has_key("breed"): breed = profile_data["breed"] if breed != "other" and breed != "": if breed in ["ubuntu", "debian", "redhat"]: guest.set_os_type("linux") elif breed == "suse": guest.set_os_type("linux") # SUSE requires the correct arch to find # kernel+initrd on the inst-source /boot/<arch>/loader/... guest.arch = profile_data["arch"] if guest.arch in ["i386", "i486", "i586"]: guest.arch = "i686" elif breed in ["windows"]: guest.set_os_type("windows") else: guest.set_os_type("unix") if profile_data.has_key("os_version"): # FIXME: when os_version is not defined and it's linux, do we use generic24/generic26 ? if breed == "ubuntu": # If breed is Ubuntu, need to set the version to the type of "ubuntu<version>" # as defined by virtinst. (i.e. ubuntunatty) version = "ubuntu%s" % profile_data["os_version"] else: version = profile_data["os_version"] if version != "other" and version != "": try: guest.set_os_variant(version) except: print "- virtinst library does not understand variant %s, treating as generic" % version pass guest.set_name(name) guest.set_memory(ram) guest.set_vcpus(vcpus) guest.set_autostart(virt_auto_boot) # for KVM, we actually can't disable this, since it's the only # console it has other than SDL guest.set_graphics("vnc") if uuid is not None: guest.set_uuid(uuid) for d in disks: print "- adding disk: %s of size %s (driver type=%s)" % (d[0], d[1], d[2]) if d[1] != 0 or d[0].startswith("/dev"): vdisk = virtinst.VirtualDisk(d[0], size=d[1], bus=qemu_driver_type) try: vdisk.set_driver_type(d[2]) except: print "- virtinst failed to create the VirtualDisk with the specified driver type (%s), using whatever it defaults to instead" % d[ 2] guest.disks.append(vdisk) else: raise koan.InfoException( "this virtualization type does not work without a disk image, set virt-size in Cobbler to non-zero" ) if profile_data.has_key("interfaces"): counter = 0 interfaces = profile_data["interfaces"].keys() interfaces.sort() vlanpattern = re.compile("[a-zA-Z0-9]+\.[0-9]+") for iname in interfaces: intf = profile_data["interfaces"][iname] if intf["interface_type"] in ("master", "bond", "bridge") or vlanpattern.match( iname) or iname.find(":") != -1: continue mac = intf["mac_address"] if mac == "": mac = random_mac() if bridge is None: profile_bridge = profile_data["virt_bridge"] intf_bridge = intf["virt_bridge"] if intf_bridge == "": if profile_bridge == "": raise koan.InfoException( "virt-bridge setting is not defined in cobbler") intf_bridge = profile_bridge else: if bridge.find(",") == -1: intf_bridge = bridge else: bridges = bridge.split(",") intf_bridge = bridges[counter] nic_obj = virtinst.VirtualNetworkInterface(macaddr=mac, bridge=intf_bridge, model=qemu_net_type) guest.nics.append(nic_obj) counter = counter + 1 else: if bridge is not None: profile_bridge = bridge else: profile_bridge = profile_data["virt_bridge"] if profile_bridge == "": raise koan.InfoException( "virt-bridge setting is not defined in cobbler") nic_obj = virtinst.VirtualNetworkInterface(macaddr=random_mac(), bridge=profile_bridge, model=qemu_net_type) guest.nics.append(nic_obj) guest.start_install() return "use virt-manager and connect to qemu to manage guest: %s" % name
def validate_storage(self, vmname, path=None, size=None, sparse=None, device="disk", fmt=None, collidelist=None): collidelist = collidelist or [] use_storage = self.widget("config-storage-box").is_sensitive() is_default = self.is_default_storage() conn = self.conn.get_backend() # Validate storage if not use_storage: return True # Make sure default pool is running if is_default: ret = self._check_default_pool_active() if not ret: return False readonly = False if device == virtinst.VirtualDisk.DEVICE_CDROM: readonly = True try: if size is None and sparse is None: size = uiutil.spin_get_helper( self.widget("config-storage-size")) sparse = ( not self.widget("config-storage-nosparse").get_active()) if path is None: if is_default: path = self.get_default_path(vmname, collidelist) else: path = self.widget( "config-storage-entry").get_text().strip() if is_default: path = self._check_ideal_path(path, vmname, collidelist) if not path and device in ["disk", "lun"]: return self.err.val_err(_("A storage path must be specified.")) disk = virtinst.VirtualDisk(conn) disk.path = path or None disk.read_only = readonly disk.device = device if disk.wants_storage_creation(): pool = disk.get_parent_pool() vol_install = virtinst.VirtualDisk.build_vol_install( disk.conn, os.path.basename(disk.path), pool, size, sparse, fmt=fmt or None) disk.set_vol_install(vol_install) if not fmt: fmt = self.conn.get_default_storage_format() if (self.is_default_storage() and disk.get_vol_install() and fmt in disk.get_vol_install().list_formats()): logging.debug("Setting disk format from prefs: %s", fmt) disk.get_vol_install().format = fmt disk.validate() except Exception, e: return self.err.val_err(_("Storage parameter error."), e)
def _import_file(conn, input_file): """ Parse the OVF file and generate a virtinst.Guest object from it """ root = xml.etree.ElementTree.parse(input_file).getroot() vsnode = _find(root, "./ovf:VirtualSystem") vhnode = _find(vsnode, "./ovf:VirtualHardwareSection") # General info name = _text(vsnode.find("./ovf:Name", OVF_NAMESPACES)) desc = _text( vsnode.find("./ovf:AnnotationSection/ovf:Annotation", OVF_NAMESPACES)) if not desc: desc = _text(vsnode.find("./ovf:Description", OVF_NAMESPACES)) vhxpath = "./ovf:Item[rasd:ResourceType='%s']" vcpus = _text( _find(vhnode, (vhxpath % DEVICE_CPU) + "/rasd:VirtualQuantity")) mem = _text( _find(vhnode, (vhxpath % DEVICE_MEMORY) + "/rasd:VirtualQuantity")) alloc_mem = _text( _find(vhnode, (vhxpath % DEVICE_MEMORY) + "/rasd:AllocationUnits")) # Sections that we handle # NetworkSection is ignored, since I don't have an example of # a valid section in the wild. parsed_sections = [ "References", "DiskSection", "NetworkSection", "VirtualSystem" ] # Check for unhandled 'required' sections for env_node in root.findall("./"): if any([p for p in parsed_sections if p in env_node.tag]): continue logging.debug("Unhandled XML section '%s'", env_node.tag) if not _convert_bool_val(env_node.attrib.get("required")): continue raise Exception( _("OVF section '%s' is listed as " "required, but parser doesn't know " "how to handle it.") % env_node.name) disk_buses = {} for node in _findall(vhnode, vhxpath % DEVICE_IDE_BUS): instance_id = _text(_find(node, "rasd:InstanceID")) disk_buses[instance_id] = "ide" for node in _findall(vhnode, vhxpath % DEVICE_SCSI_BUS): instance_id = _text(_find(node, "rasd:InstanceID")) disk_buses[instance_id] = "scsi" ifaces = [] for node in _findall(vhnode, vhxpath % DEVICE_ETHERNET): iface = virtinst.VirtualNetworkInterface(conn) # Just ignore 'source' info for now and choose the default net_model = _text(_find(node, "rasd:ResourceSubType")) if net_model and not net_model.isdigit(): iface.model = net_model.lower() iface.set_default_source() ifaces.append(iface) disks = [] for node in _findall(vhnode, vhxpath % DEVICE_DISK): bus_id = _text(_find(node, "rasd:Parent")) path = _text(_find(node, "rasd:HostResource")) bus = disk_buses.get(bus_id, "ide") fmt = "raw" if path: path = _lookup_disk_path(root, path) fmt = "vmdk" disk = virtinst.VirtualDisk(conn) disk.path = path disk.driver_type = fmt disk.bus = bus disk.device = "disk" disks.append(disk) # Generate the Guest guest = conn.caps.lookup_virtinst_guest() guest.installer = virtinst.ImportInstaller(conn) if not name: name = os.path.basename(input_file) guest.name = name.replace(" ", "_") guest.description = desc or None if vcpus: guest.vcpus = int(vcpus) if mem: guest.memory = _convert_alloc_val(alloc_mem, mem) * 1024 for dev in ifaces + disks: guest.add_device(dev) return guest