def get_test_config(self): cfg_path = os.path.abspath(self.config_path) with open(cfg_path, 'r') as fd: xmlstr = fd.read() fd.close() config = xmlobject.loads(xmlstr) return config
def get_scenario_file_vm(vm_name, scenario_file): with open(scenario_file, 'r') as fd: xmlstr = fd.read() fd.close() scenariofile = xmlobject.loads(xmlstr) for s_vm in xmlobject.safe_list(scenariofile.vms.vm): if s_vm.name_ == vm_name: return s_vm
def destroy_scenario(scenario_config, scenario_file): with open(scenario_file, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) zstack_management_ip = scenario_config.basicConfig.zstackManagementIp.text_ for vm in xmlobject.safe_list(scenario_file.vms.vm): destroy_vm(zstack_management_ip, vm.uuid_)
def listVirtualMachines(url, sasluser, saslpass, keystr): def getMac(ifxml): return ifxml.mac.address_ if ifxml.mac else "" def getMacs(iface): if not iface: return [] if isinstance(iface, list): return [ getMac(inf) for inf in iface ] return [ getMac(iface) ] def getV2vCap(qemuver, libvirtver, vminfo): if qemuver < getVerNumber(1, 1): return False if any(map(lambda v: v.protocol == 'rbd', vminfo.volumes)): return libvirtver >= getVerNumber(1, 2, 16) return libvirtver >= getVerNumber(1, 2, 9) vms = [] v2vCaps = {} qemuVersion, libvirtVersion = None, None with LibvirtConn(url, sasluser, saslpass, keystr) as c: qemuVersion = c.getVersion() libvirtVersion = c.getLibVersion() for dom in filter(lambda d: d.isActive(), c.listAllDomains()): info = VmInfo() info.name = dom.name() info.uuid = dom.UUIDString() dinfo = dom.info() info.memorySize = dinfo[1] * 1024 info.cpuNum = dinfo[3] try: info.description = dom.metadata(libvirt.VIR_DOMAIN_METADATA_DESCRIPTION, None) except libvirt.libvirtError as ex: pass xmldesc = dom.XMLDesc(0) logger.info("domain xml for vm: {}\n{}".format(info.name, xmldesc)) dxml = xmlobject.loads(xmldesc) if dxml.devices.hasattr('interface'): info.macAddresses = getMacs(dxml.devices.interface) else: info.macAddresses = [] info.volumes = getVolumes(dom, dxml) cap = getV2vCap(qemuVersion, libvirtVersion, info) v2vCaps[info.uuid] = cap info.cdromNum = getCdromNum(dom, dxml) vms.append(info) return qemuVersion, libvirtVersion, vms, v2vCaps
def from_virt_domain(domain): vm = Vm() vm.domain = domain (state, _, _, _, _) = domain.info() vm.state = Vm.power_state[state] vm.domain_xml = domain.XMLDesc(0) vm.domain_xmlobject = xmlobject.loads(vm.domain_xml) vm.uuid = vm.domain_xmlobject.name.text_ return vm
def testName(self): cfg = os.path.abspath('zstacklib/test/TestCreateVm.xml') with open(cfg, 'r') as fd: content = fd.read() xo = xmlobject.loads(content) xmlstr = xo.dump() xmldom = dom.parseString(xmlstr) xmlstr = xmldom.toprettyxml() text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', xmlstr) print prettyXml
def get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, index): test_util.test_logger("@@DEBUG@@:<scenarioConfig:%s><scenarioFile:%s><scenarioFile is existed: %s>" \ %(str(scenarioConfig), str(scenarioFile), str(os.path.exists(scenarioFile)))) if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile): return mn_host_list test_util.test_logger("@@DEBUG@@: after config file exist check") with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) return xmlobject.safe_list(scenario_file.vms.vm)[index]
def get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, index): test_util.test_logger("@@DEBUG@@:<scenarioConfig:%s><scenarioFile:%s><scenarioFile is existed: %s>" \ %(str(scenarioConfig), str(scenarioFile), str(os.path.exists(scenarioFile)))) if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile): return mha_s_vm_list test_util.test_logger("@@DEBUG@@: after config file exist check") with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) return xmlobject.safe_list(scenario_file.vms.vm)[index]
def build_deploy_xmlobject_from_configure(xml_cfg_path, template_file_path=None): with open(xml_cfg_path, 'r') as fd: xmlstr = fd.read() if template_file_path: d = _template_to_dict(template_file_path) tmpt = string.Template(xmlstr) try: xmlstr = tmpt.substitute(d) except KeyError as key: test_fail("Did not find value definition in [template:] '%s' for [KEY:] '%s' from [config:] '%s' " % (template_file_path, key, xml_cfg_path)) return xmlobject.loads(xmlstr)
def from_iptables_xml(): output = shell.call('/sbin/iptables-save | /bin/iptables-xml') obj = xmlobject.loads(output) ret = IPTables() if not xmlobject.has_element(obj, 'table'): return None for to in obj.table: t = Table() t.table_xml_object = to t._parse_chains() ret.tables[t.name] = t return ret
def destroy_scenario(scenario_config, scenario_file): with open(scenario_file, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) zstack_management_ip = scenario_config.basicConfig.zstackManagementIp.text_ for vm in xmlobject.safe_list(scenario_file.vms.vm): #delete eip if xmlobject.has_element(vm, 'eipRef'): for l3network in xmlobject.safe_list(vm.l3Networks.l3Network): for eip in eip_lst: eip.delete() for vip in vip_lst: vip.delete() destroy_vm(zstack_management_ip, vm.uuid_)
def get_sce_hosts(scenarioConfig=test_lib.all_scenario_config, scenarioFile=test_lib.scenario_file): host_list = [] if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile): return host_list for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for s_vm in xmlobject.safe_list(scenario_file.vms.vm): if s_vm.name_ == vm.name_: host_list.append(s_vm) return host_list
def abort_block_job(conn, filters): with LibvirtConn(conn.libvirtURI, conn.saslUser, conn.saslPass, conn.sshPrivKey) as c: dom = c.lookupByUUIDString(cmd.srcVmUuid) if not dom: logger.info('VM not found: {}'.format(cmd.srcVmUuid)) return xmlDesc = dom.XMLDesc(0) dxml = xmlobject.loads(xmlDesc) volumes = filter(lambda v: not skipVolume(filters, v.name), getVolumes(dom, dxml)) for v in volumes: info = dom.blockJobInfo(v.name, 0) if info: dom.blockJobAbort(v.name)
def get_buildid_by_sce_file(scenarioFile): """ It gets host with vip bound, while returned a s_vm config """ with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for s_vm in xmlobject.safe_list(scenario_file.vms.vm): raw_name = s_vm.name_ test_util.test_logger("raw name from s_vm is %s" %(raw_name)) sub_name_lst = raw_name.split('_') buildid = sub_name_lst[6] test_util.test_logger("get buildid is %s" %(buildid)) return buildid
def get_mn_host(scenarioConfig, scenarioFile): mn_host_list = [] if scenarioConfig == None or scenarioFile == None or not os.path.exists( scenarioFile): return mn_host_list for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): if xmlobject.has_element(vm, 'mnHostRef'): with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for s_vm in xmlobject.safe_list(scenario_file.vms.vm): if s_vm.name_ == vm.name_: mn_host_list.append(s_vm) return mn_host_list
def setup_static_ip(scenario_file): ssh_cmd = 'sshpass -p password ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' with open(scenario_file, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for vm in xmlobject.safe_list(scenario_file.vms.vm): mnip = vm.managementIp_ if xmlobject.has_element(vm, 'ips'): for ip in xmlobject.safe_list(vm.ips.ip): nic_ip = ip.ip_ if nic_ip.startswith("10"): if shell.run("%s %s 'ip a|grep br_zsn1'"%(ssh_cmd, mnip))== 0: nic = "br_zsn1" else: nic = "zsn1" netmask = "255.255.255.0" shell.call("%s %s zs-network-setting -i %s %s %s|exit 0" %(ssh_cmd, mnip, nic, nic_ip, netmask) ) return
def getCdromNum(dom, dxml=None): if not dxml: dxml = xmlobject.loads(dom.XMLDesc(0)) def countCdrom(domain_xml): disk = domain_xml.devices.disk if not disk: return 0 if isinstance(disk, list): counter = 0 for disk_xml in disk: if disk_xml.device_ == 'cdrom': counter += 1 return counter else: return 0 return countCdrom(dxml)
def get_mn_host(scenarioConfig, scenarioFile): mn_host_list = [] test_util.test_logger("@@DEBUG@@:<scenarioConfig:%s><scenarioFile:%s><scenarioFile is existed: %s>" \ %(str(scenarioConfig), str(scenarioFile), str(os.path.exists(scenarioFile)))) if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile): return mn_host_list test_util.test_logger("@@DEBUG@@: after config file exist check") for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): if xmlobject.has_element(vm, 'mnHostRef'): with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for s_vm in xmlobject.safe_list(scenario_file.vms.vm): if s_vm.name_ == vm.name_: mn_host_list.append(s_vm) test_util.test_logger("@@DEBUG@@: %s" %(str(mn_host_list))) return mn_host_list
def getVolumes(dom, dxml=None): def getVolume(dom, diskxml): v = VolumeInfo() if diskxml.device_ in [ 'cdrom', 'floppy' ]: return None if diskxml.hasattr('boot') and diskxml.boot and diskxml.boot.hasattr('order_') and diskxml.boot.order_ == '1': v.type = 'ROOT' else: v.type = 'DATA' v.name = diskxml.target.dev_ v.bus = diskxml.target.bus_ if hasattr(diskxml.source, 'protocol_'): v.protocol = diskxml.source.protocol_ v.size, _, v.physicalSize = dom.blockInfo(v.name) return v def listVolumes(dom, disk): if not disk: return [] if isinstance(disk, list): return [ getVolume(dom, d) for d in disk ] return [ getVolume(dom, disk) ] if not dxml: dxml = xmlobject.loads(dom.XMLDesc(0)) volumes = filter(lambda v:v, listVolumes(dom, dxml.devices.disk)) if len(volumes) == 0: raise Exception("no disks found for VM: "+dom.name()) if len(filter(lambda v: v.type == 'ROOT', volumes)) == 0: volumes[0].type = 'ROOT' return volumes
def export_zstack_deployment_config(deploy_config=None): root_xml = dump_zstack_deployment_config(deploy_config) return xmlobject.loads(etree.tostring(root_xml))
def convert(self, req): def get_mount_command(cmd): timeout_str = "timeout 30" username = getUsername(cmd.libvirtURI) if username == 'root': return "{0} mount".format(timeout_str) if cmd.sshPassword: return "echo {0} | {1} sudo -S mount".format(cmd.sshPassword, timeout_str) return "{0} sudo mount".format(timeout_str) def validate_and_make_dir(_dir): exists = os.path.exists(_dir) if not exists: linux.mkdir(_dir) return exists def do_ssh_mount(cmd, local_mount_point, vm_v2v_dir, real_storage_path): mount_cmd = get_mount_command(cmd) mount_paths = "{}:{} {}".format(cmd.managementIp, real_storage_path, local_mount_point) alternative_mount = mount_cmd + " -o vers=3" with lock.NamedLock(local_mount_point): cmdstr = "mkdir -p {0} && ls {1} 2>/dev/null || {2} {3} || {4} {3}".format( local_mount_point, vm_v2v_dir, mount_cmd, mount_paths, alternative_mount) try: runSshCmd(cmd.libvirtURI, cmd.sshPrivKey, cmdstr) except shell.ShellError as ex: if "Stale file handle" in str(ex): cmdstr = "umount {0} && {1} {2} || {3} {2}".format( local_mount_point, mount_cmd, mount_paths, alternative_mount) runSshCmd(cmd.libvirtURI, cmd.sshPrivKey, cmdstr) cmd = jsonobject.loads(req[http.REQUEST_BODY]) real_storage_path = getRealStoragePath(cmd.storagePath) storage_dir = os.path.join(real_storage_path, cmd.srcVmUuid) rsp = ConvertRsp() last_task = self.load_and_save_task(req, rsp, validate_and_make_dir, storage_dir) if last_task and last_task.agent_pid == os.getpid(): rsp = self.wait_task_complete(last_task) return jsonobject.dumps(rsp) local_mount_point = os.path.join("/tmp/zs-v2v/", cmd.managementIp) vm_v2v_dir = os.path.join(local_mount_point, cmd.srcVmUuid) libvirtHost = getHostname(cmd.libvirtURI) try: do_ssh_mount(cmd, local_mount_point, vm_v2v_dir, real_storage_path) except shell.ShellError as ex: logger.info(str(ex)) raise Exception('host {} cannot access NFS on {}'.format(libvirtHost, cmd.managementIp)) if linux.find_route_interface_by_destination_ip(linux.get_host_by_name(cmd.managementIp)): cmdstr = "tc filter replace dev %s protocol ip parent 1: prio 1 u32 match ip src %s/32 flowid 1:1" \ % (QOS_IFB, cmd.managementIp) shell.run(cmdstr) volumes = None filters = buildFilterDict(cmd.volumeFilters) startTime = time.time() with LibvirtConn(cmd.libvirtURI, cmd.saslUser, cmd.saslPass, cmd.sshPrivKey) as c: dom = c.lookupByUUIDString(cmd.srcVmUuid) if not dom: raise Exception('VM not found: {}'.format(cmd.srcVmUuid)) xmlDesc = dom.XMLDesc(0) dxml = xmlobject.loads(xmlDesc) if dxml.os.hasattr('firmware_') and dxml.os.firmware_ == 'efi' or dxml.os.hasattr('loader'): rsp.bootMode = 'UEFI' volumes = filter(lambda v: not skipVolume(filters, v.name), getVolumes(dom, dxml)) oldstat, _ = dom.state() needResume = True if cmd.pauseVm and oldstat != libvirt.VIR_DOMAIN_PAUSED: dom.suspend() needResume = False # libvirt >= 3.7.0 ? flags = 0 if c.getLibVersion() < getVerNumber(3, 7) else libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB needDefine = False if flags == 0 and dom.isPersistent(): dom.undefine() needDefine = True for v in volumes: localpath = os.path.join(storage_dir, v.name) info = dom.blockJobInfo(v.name, 0) if os.path.exists(localpath) and not info: os.remove(localpath) if not os.path.exists(localpath) and info: raise Exception("blockjob already exists on disk: "+v.name) if info: continue logger.info("start copying {}/{} ...".format(cmd.srcVmUuid, v.name)) # c.f. https://github.com/OpenNebula/one/issues/2646 linux.touch_file(localpath) dom.blockCopy(v.name, "<disk type='file'><source file='{}'/><driver type='{}'/></disk>".format(os.path.join(vm_v2v_dir, v.name), cmd.format), None, flags) end_progress = 60 total_volume_size = sum(volume.size for volume in volumes) if cmd.sendCommandUrl: Report.url = cmd.sendCommandUrl report = Report(cmd.threadContext, cmd.threadContextStack) report.processType = "KVM-V2V" while True: current_progress = 0.0 job_canceled = False for v in volumes: if v.endTime: current_progress += 1.0 * float(v.size) / float(total_volume_size) continue info = dom.blockJobInfo(v.name, 0) if not info: err_msg = 'blockjob not found on disk %s, maybe job has been canceled' % v.name logger.warn(err_msg) job_canceled = True continue end = info['end'] cur = info['cur'] if cur == end : v.endTime = time.time() logger.info("completed copying {}/{} ...".format(cmd.srcVmUuid, v.name)) progress = 1.0 else: progress = float(cur) / float(end) current_progress += progress * float(v.size) / float(total_volume_size) report.progress_report(str(int(current_progress * float(end_progress))), "start") if all(map(lambda v: v.endTime, volumes)) or job_canceled: break time.sleep(5) if job_canceled: rsp.success = False rsp.error = "cannot find blockjob on vm %s, maybe it has been canceled" % cmd.srcVmUuid if not cmd.pauseVm and oldstat != libvirt.VIR_DOMAIN_PAUSED: dom.suspend() needResume = True try: for v in volumes: if dom.blockJobInfo(v.name, 0): dom.blockJobAbort(v.name) finally: if needResume: dom.resume() if needDefine: c.defineXML(xmlDesc) # TODO # - monitor progress def makeVolumeInfo(v, startTime, devId): return { "installPath": os.path.join(storage_dir, v.name), "actualSize": v.physicalSize, "virtualSize": v.size, "virtioScsi": v.bus == 'scsi', "deviceName": v.name, "downloadTime": int(v.endTime - startTime), "deviceId": devId } if not rsp.success: return jsonobject.dumps(rsp) idx = 1 rv, dvs = None, [] for v in volumes: if v.type == 'ROOT': rv = makeVolumeInfo(v, startTime, 0) else: dvs.append(makeVolumeInfo(v, startTime, idx)) idx += 1 rsp.rootVolumeInfo = rv rsp.dataVolumeInfos = dvs return jsonobject.dumps(rsp)
def _get_host_cpu_model(conn): xml_object = xmlobject.loads(conn.getCapabilities()) return str(xml_object.host.cpu.model.text_)
def recur_parse(test_case_list): suiteroot = os.path.dirname(test_case_list) def full_path(path): if not path.startswith('/'): path = os.path.join(suiteroot, path) return path def initialize_case_result(suite_repeat, case_repeat): case_result = [] result = [] for i in range(case_repeat): case_result.append(None) for i in range(suite_repeat): result.append(list(case_result)) return result def add_cases_to_suite(xmlobject, s, suite): if s.setupCase__: setupcase = TestCase() setupcase.path = full_path(s.setupCase__) setupcase.name = setupcase.path.split('/')[-1].split('.')[0] setupcase.suite = suite setupcase.type = TestCase.SETUP_CASE setupcase.success = initialize_case_result(suite.repeat, setupcase.repeat) self.total_case_num += 1 suite.setup_case = setupcase setupcase.id = self.total_case_num self.all_cases[setupcase.id] = setupcase suite.cases.append(setupcase) suite.total_case_num += 1 if len(setupcase.name) > self.case_name_max_len: self.case_name_max_len = len(setupcase.name) if xmlobject.has_element(s, self.CASE_TAG): for c in s.get_child_node_as_list(self.CASE_TAG): case = TestCase() case.name = c.name__ case.timeout = c.timeout__ case.path = full_path(c.text_) if c.noparallel__ and c.noparallel__ != 'False': case.parallel = False if not case.name: #only keep 1 level folder info for case name case.name = '/'.join(case.path.split('/')[-2:])[:-3] case.suite = suite case_name_len = len(case.name) if (c.repeat__ and c.repeat__.isdigit() and (string.atoi(c.repeat__) > 0)): case.repeat = string.atoi(c.repeat__) if case.repeat > 1: import math # case name will be increased due to add '.num' case_name_len = case_name_len + 1 + int(math.log(case.repeat, 10)) + 1 if case_name_len > self.case_name_max_len: self.case_name_max_len = case_name_len self.info('\t\tRun [%s] times for Case: [%s]' % (case.repeat, case.name)) # c_repeat = 2 # while (c_repeat <= case.repeat): # r_case = copy.deepcopy(case) # r_case.name = case.name + '.'+ str(c_repeat) # c_repeat += 1 # suite.cases.append(r_case) # self.total_case_num += 1 # r_case.id = self.total_case_num # if len(r_case.name) > self.case_name_max_len: # self.case_name_max_len = len(r_case.name) # self.all_cases[case.id] = r_case case.success = initialize_case_result(suite.repeat, case.repeat) suite.cases.append(case) suite.total_case_num += case.repeat self.total_case_num += 1 #Allow same test cases be executed multi times. case.id = self.total_case_num self.all_cases[case.id] = case if s.teardownCase__: teardowncase = TestCase() teardowncase.path = full_path(s.teardownCase__) teardowncase.name = teardowncase.path.split('/')[-1].split('.')[0] teardowncase.suite = suite teardowncase.type = TestCase.TEARDOWN_CASE teardowncase.success = initialize_case_result(suite.repeat, teardowncase.repeat) suite.teardown_case = teardowncase self.total_case_num += 1 teardowncase.id = self.total_case_num self.all_cases[teardowncase.id] = teardowncase suite.cases.append(teardowncase) suite.total_case_num += 1 if len(teardowncase.name) > self.case_name_max_len: self.case_name_max_len = len(teardowncase.name) if s.config__: suite.test_config = full_path(s.config__) self.info('discovering test cases in %s ...' % test_case_list) with open(test_case_list, 'r') as fd: xmlstr = fd.read() xo = xmlobject.loads(xmlstr) if xo.get_tag() != self.INTEGRATION_TEST_TAG: raise TestError('configuration must start with tag <%s>' % self.INTEGRATION_TEST_TAG) if xmlobject.has_element(xo, self.SUITE_TAG): for s in xo.get_child_node_as_list(self.SUITE_TAG): suite = TestSuite() suite.name = s.name_.replace(' ', '_') suite.id = self.suite_num self.suite_num += 1 suite.root_path = os.path.dirname(test_case_list) suite.timeout = s.timeout__ if (s.parallel__ and s.parallel__.isdigit() \ and (string.atoi(s.parallel__) > 1)): suite.parallel = string.atoi(s.parallel__) if (s.repeat__ and s.repeat__.isdigit() \ and (string.atoi(s.repeat__) > 1)): suite.repeat = string.atoi(s.repeat__) self.info('\tSuite [%s] will run [%s] times:' \ % (suite.name, suite.repeat)) # repeat = 2 # while (repeat <= suite.repeat): # r_suite = copy.deepcopy(suite) # r_suite.name = suite.name + '.' + str(repeat) # r_suite.id = suite_id # suite_id += 1 # if (suite.setup_case): # self.total_case_num += 1 # if (suite.teardown_case): # self.total_case_num += 1 # repeat += 1 # add_cases_to_suite(xmlobject, s, r_suite) # self.suites[r_suite.id] = r_suite add_cases_to_suite(xmlobject, s, suite) self.suites[suite.id] = suite if xmlobject.has_element(xo, self.IMPORT_TAG): for i in xo.get_child_node_as_list(self.IMPORT_TAG): path = full_path(i.path_) if not os.path.exists(path): raise TestError('unable to find test configuration file at %s, imported config[%s]' % (path, i.path_)) recur_parse(path)
def from_StartVmCmd(cmd): use_virtio = cmd.useVirtio elements = {} def make_root(): root = etree.Element('domain') root.set('type', 'kvm') #self._root.set('type', 'qemu') root.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0') elements['root'] = root def make_cpu(): root = elements['root'] e(root, 'vcpu', str(cmd.cpuNum), {'placement':'static'}) tune = e(root, 'cputune') e(tune, 'shares', str(cmd.cpuSpeed * cmd.cpuNum)) def make_memory(): root = elements['root'] mem = cmd.memory / 1024 e(root, 'memory', str(mem), {'unit':'k'}) e(root, 'currentMemory', str(mem), {'unit':'k'}) def make_os(): root = elements['root'] os = e(root, 'os') e(os, 'type', 'hvm') e(os, 'boot', None, {'dev':cmd.bootDev}) def make_features(): root = elements['root'] features = e(root, 'features') for f in ['acpi', 'apic', 'pae']: e(features, f) def make_devices(): root = elements['root'] devices = e(root, 'devices') e(devices, 'emulator', kvmagent.get_qemu_path()) e(devices, 'input', None, {'type':'tablet', 'bus':'usb'}) elements['devices'] = devices def make_cdrom(): if not cmd.isoPath__: return devices = elements['devices'] cdrom = e(devices, 'disk', None, {'type':'file', 'device':'cdrom'}) e(cdrom, 'driver', None, {'name':'qemu', 'type':'raw'}) e(cdrom, 'source', None, {'file':cmd.isoPath}) e(cdrom, 'target', None, {'dev':'hdc', 'bus':'ide'}) e(cdrom, 'readonly', None) def make_volumes(): devices = elements['devices'] volumes = [cmd.rootVolume] volumes.extend(cmd.dataVolumes) for v in volumes: if v.deviceId >= len(Vm.DEVICE_LETTERS): err = "%s exceeds max disk limit, it's %s but only 26 allowed" % v.deviceId logger.warn(err) raise kvmagent.KvmError(err) dev_letter = Vm.DEVICE_LETTERS[v.deviceId] disk = e(devices, 'disk', None, {'type':'file', 'device':'disk', 'snapshot':'external'}) e(disk, 'driver', None, {'name':'qemu', 'type':'qcow2', 'cache':'none'}) e(disk, 'source', None, {'file':v.installPath}) if use_virtio: e(disk, 'target', None, {'dev':'vd%s' % dev_letter, 'bus':'virtio'}) else: e(disk, 'target', None, {'dev':'hd%s' % dev_letter, 'bus':'ide'}) #self._e(disk, 'target', None, {'dev':'vd%s' % dev_letter, 'bus':'ide'}) def make_nics(): if not cmd.nics: return devices = elements['devices'] for nic in cmd.nics: interface = e(devices, 'interface', None, {'type':'bridge'}) e(interface, 'mac', None, {'address':nic.mac}) e(interface, 'rom', None, {'bar':'off'}) e(interface, 'source', None, {'bridge':nic.bridgeName}) if use_virtio: e(interface, 'model', None, {'type':'virtio'}) else: e(interface, 'model', None, {'type':'e1000'}) e(interface, 'target', None, {'dev':nic.nicInternalName}) #self._e(interface, 'model', None, {'type':'e1000'}) def make_meta(): root = elements['root'] e(root, 'name', cmd.vmInstanceUuid) e(root, 'uuid', uuidhelper.to_full_uuid(cmd.vmInstanceUuid)) e(root, 'description', cmd.vmName) e(root, 'clock', None, {'offset':'utc'}) e(root, 'on_poweroff', 'destroy') e(root, 'on_crash', 'restart') e(root, 'on_reboot', 'restart') meta = e(root, 'metadata') e(meta, 'zstack', 'True') e(meta, 'internalId', str(cmd.vmInternalId)) def make_vnc(): devices = elements['devices'] vnc = e(devices, 'graphics', None, {'type':'vnc', 'port':'5900', 'autoport':'yes'}) e(vnc, "listen", None, {'type':'address', 'address':'0.0.0.0'}) def make_addons(): if not cmd.addons: return devices = elements['devices'] channel = cmd.addons['channel'] if channel: basedir = os.path.dirname(channel.socketPath) linux.mkdir(basedir, 0777) chan = e(devices, 'channel', None, {'type':'unix'}) e(chan, 'source', None, {'mode':'bind', 'path':channel.socketPath}) e(chan, 'target', None, {'type':'virtio', 'name':channel.targetName}) make_root() make_meta() make_cpu() make_memory() make_os() make_features() make_devices() make_nics() make_volumes() make_cdrom() make_vnc() make_addons() root = elements['root'] xml = etree.tostring(root) vm = Vm() vm.uuid = cmd.vmInstanceUuid vm.domain_xml = xml vm.domain_xmlobject = xmlobject.loads(xml) return vm
teardowncase.id = self.total_case_num self.all_cases[teardowncase.id] = teardowncase suite.cases.append(teardowncase) suite.total_case_num += 1 if len(teardowncase.name) > self.case_name_max_len: self.case_name_max_len = len(teardowncase.name) if s.config__: suite.test_config = full_path(s.config__) self.info('discovering test cases in %s ...' % test_case_list) with open(test_case_list, 'r') as fd: xmlstr = fd.read() xo = xmlobject.loads(xmlstr) if xo.get_tag() != self.INTEGRATION_TEST_TAG: raise TestError('configuration must start with tag <%s>' % self.INTEGRATION_TEST_TAG) if xmlobject.has_element(xo, self.SUITE_TAG): for s in xo.get_child_node_as_list(self.SUITE_TAG): suite = TestSuite() suite.name = s.name_.replace(' ', '_') if s.hasattr('path_'): suite.path = s.path_ suite.id = self.suite_num self.suite_num += 1 suite.root_path = os.path.dirname(test_case_list) suite.timeout = s.timeout__
def from_StartVmCmd(cmd): use_virtio = cmd.useVirtio elements = {} def make_root(): root = etree.Element('domain') root.set('type', 'kvm') #self._root.set('type', 'qemu') root.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0') elements['root'] = root def make_cpu(): root = elements['root'] e(root, 'vcpu', str(cmd.cpuNum), {'placement': 'static'}) tune = e(root, 'cputune') e(tune, 'shares', str(cmd.cpuSpeed * cmd.cpuNum)) def make_memory(): root = elements['root'] mem = cmd.memory / 1024 e(root, 'memory', str(mem), {'unit': 'k'}) e(root, 'currentMemory', str(mem), {'unit': 'k'}) def make_os(): root = elements['root'] os = e(root, 'os') e(os, 'type', 'hvm') e(os, 'boot', None, {'dev': cmd.bootDev}) def make_features(): root = elements['root'] features = e(root, 'features') for f in ['acpi', 'apic', 'pae']: e(features, f) def make_devices(): root = elements['root'] devices = e(root, 'devices') e(devices, 'emulator', kvmagent.get_qemu_path()) e(devices, 'input', None, {'type': 'tablet', 'bus': 'usb'}) elements['devices'] = devices def make_cdrom(): if not cmd.isoPath__: return devices = elements['devices'] cdrom = e(devices, 'disk', None, { 'type': 'file', 'device': 'cdrom' }) e(cdrom, 'driver', None, {'name': 'qemu', 'type': 'raw'}) e(cdrom, 'source', None, {'file': cmd.isoPath}) e(cdrom, 'target', None, {'dev': 'hdc', 'bus': 'ide'}) e(cdrom, 'readonly', None) def make_volumes(): devices = elements['devices'] volumes = [cmd.rootVolume] volumes.extend(cmd.dataVolumes) for v in volumes: if v.deviceId >= len(Vm.DEVICE_LETTERS): err = "%s exceeds max disk limit, it's %s but only 26 allowed" % v.deviceId logger.warn(err) raise kvmagent.KvmError(err) dev_letter = Vm.DEVICE_LETTERS[v.deviceId] disk = e(devices, 'disk', None, { 'type': 'file', 'device': 'disk', 'snapshot': 'external' }) e(disk, 'driver', None, { 'name': 'qemu', 'type': 'qcow2', 'cache': 'none' }) e(disk, 'source', None, {'file': v.installPath}) if use_virtio: e(disk, 'target', None, { 'dev': 'vd%s' % dev_letter, 'bus': 'virtio' }) else: e(disk, 'target', None, { 'dev': 'hd%s' % dev_letter, 'bus': 'ide' }) #self._e(disk, 'target', None, {'dev':'vd%s' % dev_letter, 'bus':'ide'}) def make_nics(): if not cmd.nics: return devices = elements['devices'] for nic in cmd.nics: interface = e(devices, 'interface', None, {'type': 'bridge'}) e(interface, 'mac', None, {'address': nic.mac}) e(interface, 'rom', None, {'bar': 'off'}) e(interface, 'source', None, {'bridge': nic.bridgeName}) if use_virtio: e(interface, 'model', None, {'type': 'virtio'}) else: e(interface, 'model', None, {'type': 'e1000'}) e(interface, 'target', None, {'dev': nic.nicInternalName}) #self._e(interface, 'model', None, {'type':'e1000'}) def make_meta(): root = elements['root'] e(root, 'name', cmd.vmInstanceUuid) e(root, 'uuid', uuidhelper.to_full_uuid(cmd.vmInstanceUuid)) e(root, 'description', cmd.vmName) e(root, 'clock', None, {'offset': 'utc'}) e(root, 'on_poweroff', 'destroy') e(root, 'on_crash', 'restart') e(root, 'on_reboot', 'restart') meta = e(root, 'metadata') e(meta, 'zstack', 'True') e(meta, 'internalId', str(cmd.vmInternalId)) def make_vnc(): devices = elements['devices'] vnc = e(devices, 'graphics', None, { 'type': 'vnc', 'port': '5900', 'autoport': 'yes' }) e(vnc, "listen", None, {'type': 'address', 'address': '0.0.0.0'}) def make_addons(): if not cmd.addons: return devices = elements['devices'] channel = cmd.addons['channel'] if channel: basedir = os.path.dirname(channel.socketPath) linux.mkdir(basedir, 0777) chan = e(devices, 'channel', None, {'type': 'unix'}) e(chan, 'source', None, { 'mode': 'bind', 'path': channel.socketPath }) e(chan, 'target', None, { 'type': 'virtio', 'name': channel.targetName }) make_root() make_meta() make_cpu() make_memory() make_os() make_features() make_devices() make_nics() make_volumes() make_cdrom() make_vnc() make_addons() root = elements['root'] xml = etree.tostring(root) vm = Vm() vm.uuid = cmd.vmInstanceUuid vm.domain_xml = xml vm.domain_xmlobject = xmlobject.loads(xml) return vm
def export_zstack_deployment_config(deploy_config = None): root_xml = dump_zstack_deployment_config(deploy_config) return xmlobject.loads(etree.tostring(root_xml))