def add_virtual_router(deployConfig, session_uuid, l3_name = None, \ zone_name = None): if not xmlobject.has_element(deployConfig, 'instanceOfferings.virtualRouterOffering'): return for i in xmlobject.safe_list(deployConfig.instanceOfferings.virtualRouterOffering): if l3_name and l3_name != i.managementL3NetworkRef.text_: continue if zone_name and zone_name != i.zoneRef.text_: continue print "continue l3_name: %s; zone_name: %s" % (l3_name, zone_name) action = api_actions.CreateVirtualRouterOfferingAction() action.sessionUuid = session_uuid action.name = i.name_ action.description = i.description__ action.cpuNum = i.cpuNum_ action.cpuSpeed = i.cpuSpeed_ if i.memorySize__: action.memorySize = sizeunit.get_size(i.memorySize_) elif i.memoryCapacity_: action.memorySize = sizeunit.get_size(i.memoryCapacity_) action.isDefault = i.isDefault__ action.type = 'VirtualRouter' zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=i.zoneRef.text_) zinv = get_first_item_from_list(zinvs, 'zone', i.zoneRef.text_, 'virtual router offering') action.zoneUuid = zinv.uuid cond = res_ops.gen_query_conditions('zoneUuid', '=', zinv.uuid) cond1 = res_ops.gen_query_conditions('name', '=', \ i.managementL3NetworkRef.text_, cond) minvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \ session_uuid) minv = get_first_item_from_list(minvs, 'Management L3 Network', i.managementL3NetworkRef.text_, 'virtualRouterOffering') action.managementNetworkUuid = minv.uuid if xmlobject.has_element(i, 'publicL3NetworkRef'): cond1 = res_ops.gen_query_conditions('name', '=', \ i.publicL3NetworkRef.text_, cond) pinvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \ session_uuid) pinv = get_first_item_from_list(pinvs, 'Public L3 Network', i.publicL3NetworkRef.text_, 'virtualRouterOffering') action.publicNetworkUuid = pinv.uuid iinvs = res_ops.get_resource(res_ops.IMAGE, session_uuid, \ name=i.imageRef.text_) iinv = get_first_item_from_list(iinvs, 'Image', i.imageRef.text_, 'virtualRouterOffering') action.imageUuid = iinv.uuid thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() wait_for_thread_done()
def add_backup_storage(deployConfig, session_uuid): if xmlobject.has_element(deployConfig, 'backupStorages.sftpBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.sftpBackupStorage): action = api_actions.AddSftpBackupStorageAction() action.sessionUuid = session_uuid action.name = bs.name_ action.description = bs.description__ action.url = bs.url_ action.username = bs.username_ action.password = bs.password_ action.hostname = bs.hostname_ action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution action.type = inventory.SFTP_BACKUP_STORAGE_TYPE if bs.uuid__: action.resourceUuid = bs.uuid__ thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() if xmlobject.has_element(deployConfig, 'backupStorages.simulatorBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.simulatorBackupStorage): action = api_actions.AddSimulatorBackupStorageAction() action.sessionUuid = session_uuid action.name = bs.name_ action.description = bs.description__ action.url = bs.url_ action.type = inventory.SIMULATOR_BACKUP_STORAGE_TYPE action.totalCapacity = sizeunit.get_size(bs.totalCapacity_) action.availableCapacity = sizeunit.get_size(bs.availableCapacity_) thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() wait_for_thread_done()
def add_storage_for_backup(deployConfig): print "try to add backup storage" if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'): print "find image store backup storage" for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage): if hasattr(bs, 'local_backup_storage_'): print "find local_backup_storage" cond = res_ops.gen_query_conditions('tag', '=', "allowbackup") tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond) if len(tags) > 0: print "local backup storage already exists" break cond = res_ops.gen_query_conditions('name', '=', bs.name_) bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond) print bss add_local_bs_tag = tag_ops.create_system_tag('ImageStoreBackupStorageVO', bss[0].uuid,'allowbackup') if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage): if hasattr(bs, 'remote_backup_storage_'): print "find remote_backup_storage" cond = res_ops.gen_query_conditions('tag', '=', "remotebackup") tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond) if len(tags) > 0: print "remote backup storage already exists" break cond = res_ops.gen_query_conditions('name', '=', bs.name_) bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond) print bss add_local_bs_tag = tag_ops.create_system_tag('ImageStoreBackupStorageVO', bss[0].uuid,'remotebackup')
def _add_cluster(action, zone_ref, cluster, cluster_ref): evt = action.run() deploy_logger(jsonobject.dumps(evt)) cinv = evt.inventory try: if xmlobject.has_element(cluster, 'primaryStorageRef'): for pref in xmlobject.safe_list(cluster.primaryStorageRef): ps_name = generate_dup_name(generate_dup_name(pref.text_, zone_ref, 'z'), cluster_ref, 'c') pinvs = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, name=ps_name) pinv = get_first_item_from_list(pinvs, 'Primary Storage', ps_name, 'Cluster') action_ps = api_actions.AttachPrimaryStorageToClusterAction() action_ps.sessionUuid = session_uuid action_ps.clusterUuid = cinv.uuid action_ps.primaryStorageUuid = pinv.uuid evt = action_ps.run() deploy_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) if cluster.allL2NetworkRef__ == 'true': # find all L2 network in zone and attach to cluster cond = res_ops.gen_query_conditions('zoneUuid', '=', action.zoneUuid) l2_count = res_ops.query_resource_count(res_ops.L2_NETWORK, cond, session_uuid) l2invs = res_ops.query_resource_fields(res_ops.L2_NETWORK, [{'name': 'zoneUuid', 'op': '=', 'value': action.zoneUuid}], session_uuid, ['uuid'], 0, l2_count) else: l2invs = [] if xmlobject.has_element(cluster, 'l2NetworkRef'): for l2ref in xmlobject.safe_list(cluster.l2NetworkRef): l2_name = generate_dup_name(generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c') cond = res_ops.gen_query_conditions('zoneUuid', '=', action.zoneUuid) cond = res_ops.gen_query_conditions('name', '=', l2_name, cond) l2inv = res_ops.query_resource_fields(res_ops.L2_NETWORK, cond, session_uuid, ['uuid']) if not l2inv: raise DeployError("Can't find l2 network [%s] in database." % l2_name) l2invs.extend(l2inv) for l2inv in l2invs: action = api_actions.AttachL2NetworkToClusterAction() action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.l2NetworkUuid = l2inv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start()
def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid): action = api_actions.CreateL3NetworkAction() action.sessionUuid = session_uuid action.description = l3.description__ if l3.system__ and l3.system__ != 'False': action.system = 'true' action.l2NetworkUuid = l2inv_uuid action.name = l3Name if l3.uuid__: action.resourceUuid = l3.uuid__ action.type = inventory.L3_BASIC_NETWORK_TYPE if l3.domain_name__: action.dnsDomain = l3.domain_name__ try: evt = action.run() except: exc_info.append(sys.exc_info()) deploy_logger(jsonobject.dumps(evt)) l3_inv = evt.inventory # add dns if xmlobject.has_element(l3, 'dns'): for dns in xmlobject.safe_list(l3.dns): action = api_actions.AddDnsToL3NetworkAction() action.sessionUuid = session_uuid action.dns = dns.text_ action.l3NetworkUuid = l3_inv.uuid try: evt = action.run() except: exc_info.append(sys.exc_info()) deploy_logger(jsonobject.dumps(evt)) # add ip range. if xmlobject.has_element(l3, 'ipRange'): do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid) # add network service. providers = {} action = api_actions.QueryNetworkServiceProviderAction() action.sessionUuid = session_uuid action.conditions = [] try: reply = action.run() except: exc_info.append(sys.exc_info()) for pinv in reply: providers[pinv.name] = pinv.uuid if xmlobject.has_element(l3, 'networkService'): do_add_network_service(l3.networkService, l3_inv.uuid, providers, session_uuid)
def _setting(self, options): def validate(restr, option_name, val, err_msg): p = re.compile(restr) if not p.search(val): err = "value[%s] of %s doesn't match regular expression[%s]" % (val, option_name, restr) if err_msg: err = '%s\n%s' % (err, err_msg) raise SettingError(err) def check_exists(option_name, val): if not os.path.exists(val): err = '%s of %s is not existing in filesystem' % (val, option_name) raise SettingError(err) def write_properties_file(): content = [] property_file = os.path.join(self.HOME_DIR, 'zstack.properties') for name, value in self.settings: po = self.properties_map[name] if xmlobject.has_element(po.xmlobj, 'help'): wrapper = textwrap.TextWrapper() help = wrapper.wrap(po.xmlobj.help.text_) help = ['# %s' % h for h in help] help = [re.sub(r' +', ' ', h) for h in help] content.extend(help) content.append('%s=%s\n' % (name, value)) if os.path.exists(property_file): backup_file(property_file, self.PROPERTIES_BACKUP_DIR) with open(property_file, 'w') as fd: fd.write('\n'.join(content)) print 'wrote all settings to %s' % property_file dopetions = vars(options) for name, p in self.properties: s = dopetions[p.dest] xo = p.xmlobj if xmlobject.has_element(xo, 'validator'): restr = xo.validator.text_ err_msg = None if xmlobject.has_element(xo, 'errorMessage'): err_msg = xo.errorMessage.text_ validate(restr, p.option_name, s, err_msg) if xmlobject.has_element(xo, 'checkExists'): check_exists(p.option_name, s) self.settings.append((name, s)) write_properties_file()
def add_ip_range(deployConfig, session_uuid, ip_range_name=None, zone_name=None, l3_name=None): """ Call by only adding an IP range. If the IP range is in L3 config, add_l3_network will add ip range direclty. deployConfig is a xmlobject. If using standard net_operation, please check net_operations.add_ip_range(test_util.IpRangeOption()) """ if not xmlobject.has_element(deployConfig, "zones.zone"): return l3networks = [] for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue l2networks = [] if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork)) if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork)) for l2 in l2networks: if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'): l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork)) if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for zone_duplication in range(duplication): for l3 in l3networks: if l3_name and l3_name != l3.name_: continue if not xmlobject.has_element(l3, 'ipRange'): continue if zone_duplication == 0: l3Name = l3.name_ else: l3Name = generate_dup_name(l3.name_, zone_duplication, 'z') l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name=l3Name) l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3Name, 'IP range') do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid, ip_range_name)
def add_network_service(deployConfig, session_uuid): if not xmlobject.has_element(deployConfig, "zones.zone"): return l3networks = [] for zone in xmlobject.safe_list(deployConfig.zones.zone): l2networks = [] if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork)) if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork)) for l2 in l2networks: if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'): l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork)) providers = {} action = api_actions.QueryNetworkServiceProviderAction() action.sessionUuid = session_uuid action.conditions = [] try: reply = action.run() except Exception as e: exc_info.append(sys.exc_info()) raise e for pinv in reply: providers[pinv.name] = pinv.uuid if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for zone_duplication in range(duplication): for l3 in l3networks: if not xmlobject.has_element(l3, 'networkService'): continue if zone_duplication == 0: l3_name = l3.name_ else: l3_name = generate_dup_name(l3.name_, zone_duplication, 'z') l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name=l3_name) l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3_name, 'Network Service') do_add_network_service(l3.networkService, l3_inv.uuid, providers, session_uuid)
def _deploy_cluster(zone): if not xmlobject.has_element(zone, "clusters.cluster"): return if zone.duplication__ == None: zone_duplication = 1 else: zone_duplication = int(zone.duplication__) for zone_ref in range(zone_duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): if cluster_name and cluster_name != cluster.name_: continue if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): action = api_actions.CreateClusterAction() action.sessionUuid = session_uuid action.name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c') action.description = generate_dup_name(generate_dup_name(cluster.description__, zone_ref, 'z'), cluster_ref, 'c') action.hypervisorType = cluster.hypervisorType_ zone_name = generate_dup_name(zone.name_, zone_ref, 'z') zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name) zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'Cluster') action.zoneUuid = zinv.uuid thread = threading.Thread(target=_add_cluster, args=(action, zone_ref, cluster, cluster_ref, )) wait_for_thread_queue() thread.start()
def add_instance_offering(deployConfig, session_uuid): def _add_io(instance_offering_xml_obj, session_uuid): action = api_actions.CreateInstanceOfferingAction() action.sessionUuid = session_uuid action.name = instance_offering_xml_obj.name_ action.description = instance_offering_xml_obj.description__ action.cpuNum = instance_offering_xml_obj.cpuNum_ action.cpuSpeed = instance_offering_xml_obj.cpuSpeed_ if instance_offering_xml_obj.memorySize__: action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memorySize_) elif instance_offering_xml_obj.memoryCapacity_: action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memoryCapacity_) try: evt = action.run() test_util.test_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) if not xmlobject.has_element(deployConfig, \ 'instanceOfferings.instanceOffering'): return for instance_offering_xml_obj in \ xmlobject.safe_list(deployConfig.instanceOfferings.instanceOffering): thread = threading.Thread(target = _add_io, \ args = (instance_offering_xml_obj, session_uuid, )) wait_for_thread_queue() thread.start() wait_for_thread_done()
def add_instance_offering(deployConfig, session_uuid): def _add_io(instance_offering_xml_obj, session_uuid): action = api_actions.CreateInstanceOfferingAction() action.sessionUuid = session_uuid action.name = instance_offering_xml_obj.name_ action.description = instance_offering_xml_obj.description__ action.cpuNum = instance_offering_xml_obj.cpuNum_ action.cpuSpeed = instance_offering_xml_obj.cpuSpeed_ action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memorySize_) if instance_offering_xml_obj.uuid__: action.resourceUuid = instance_offering_xml_obj.uuid__ evt = action.run() deploy_logger(jsonobject.dumps(evt)) if not xmlobject.has_element(deployConfig, 'instanceOfferings.instanceOffering'): return for instance_offering_xml_obj in \ xmlobject.safe_list(deployConfig.instanceOfferings.instanceOffering): thread = threading.Thread(target=_add_io, args=(instance_offering_xml_obj, session_uuid,)) wait_for_thread_queue() thread.start() wait_for_thread_done()
def _add_zone(zone, zone_duplication): action = api_actions.CreateZoneAction() action.sessionUuid = session_uuid if zone_duplication == 0: action.name = zone.name_ action.description = zone.description__ if zone.uuid__: action.resourceUuid = zone.uuid__ else: action.name = generate_dup_name(zone.name_, zone_duplication, 'z') action.description = generate_dup_name(zone.description__, zone_duplication, 'zone') try: evt = action.run() deploy_logger(jsonobject.dumps(evt)) zinv = evt.inventory except: exc_info.append(sys.exc_info()) if xmlobject.has_element(zone, 'backupStorageRef'): for ref in xmlobject.safe_list(zone.backupStorageRef): bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=ref.text_) bs = get_first_item_from_list(bss, 'Backup Storage', ref.text_, 'attach backup storage to zone') action = api_actions.AttachBackupStorageToZoneAction() action.sessionUuid = session_uuid action.backupStorageUuid = bs.uuid action.zoneUuid = zinv.uuid try: evt = action.run() deploy_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info())
def _deploy_l3_network(l2, zone_ref, cluster_ref): if not xmlobject.has_element(l2, "l3Networks.l3BasicNetwork"): return if not l2.duplication__: l2_dup = 1 else: l2_dup = int(l2.duplication__) for l2_num in range(l2_dup): for l3 in xmlobject.safe_list(l2.l3Networks.l3BasicNetwork): if l3_name and l3_name != l3.name_: continue l2Name = generate_dup_name( generate_dup_name(generate_dup_name(l2.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n') l3Name = generate_dup_name( generate_dup_name(generate_dup_name(l3.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n') l2invs = res_ops.get_resource(res_ops.L2_NETWORK, session_uuid, name=l2Name) l2inv = get_first_item_from_list(l2invs, 'L2 Network', l2Name, 'L3 Network') thread = threading.Thread(target=_do_l3_deploy, args=(l3, l2inv.uuid, l3Name, session_uuid,)) wait_for_thread_queue() thread.start()
def _deploy_host(cluster, zone_ref, cluster_ref): if not xmlobject.has_element(cluster, "hosts.host"): return if zone_ref == 0 and cluster_ref == 0: cluster_name = cluster.name_ else: cluster_name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c') cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name) cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network') for host in xmlobject.safe_list(cluster.hosts.host): if host_ip and host_ip != host.managementIp_: continue if host.duplication__ == None: host_duplication = 1 else: host_duplication = int(host.duplication__) for i in range(host_duplication): if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE: action = api_actions.AddKVMHostAction() action.username = host.username_ action.password = host.password_ action.timeout = AddKVMHostTimeOut if hasattr(host, 'sshPort_'): action.port = host.sshPort_ elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE: action = api_actions.AddSimulatorHostAction() if host.cpuCapacity__: action.cpuCapacity = host.cpuCapacity_ else: action.cpuCapacity = 416000 if host.memoryCapacity__: action.memoryCapacity = sizeunit.get_size(host.memoryCapacity_) else: action.memoryCapacity = sizeunit.get_size('1024G') action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.hostTags = host.hostTags__ if zone_ref == 0 and cluster_ref == 0 and i == 0: action.name = host.name_ action.description = host.description__ action.managementIp = host.managementIp_ if host.uuid__: action.resourceUuid = host.uuid__ else: action.name = generate_dup_name( generate_dup_name(generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.description = generate_dup_name( generate_dup_name(generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.managementIp = generate_dup_host_ip(host.managementIp_, zone_ref, cluster_ref, i) thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start()
def _deploy_primary_storage(zone): if xmlobject.has_element(zone, 'primaryStorages.nfsPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddNfsPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.NFS_PRIMARY_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid if pr.uuid__: action.resourceUuid = pr.uuid__ thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.simulatorPrimaryStorage'): if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for pr in xmlobject.safe_list(zone.primaryStorages.simulatorPrimaryStorage): for zone_ref in range(duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): for pref in xmlobject.safe_list(cluster.primaryStorageRef): if pref.text_ == pr.name_: if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): action = _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref) thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start()
def skip_if_not_storage_network_separate(scenarioConfig): is_storage_network_separated = False for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): for l3Network in xmlobject.safe_list(vm.l3Networks.l3Network): if xmlobject.has_element(l3Network, 'primaryStorageRef'): is_storage_network_separated = True break if not is_storage_network_separated: test_util.test_skip("not found separate network in scenario config.")
def add_zone(deployConfig, session_uuid, zone_name = None): def _add_zone(zone, zone_duplication): action = api_actions.CreateZoneAction() action.sessionUuid = session_uuid if zone_duplication == 0: action.name = zone.name_ action.description = zone.description__ if zone.uuid__: action.resourceUuid = zone.uuid__ else: action.name = generate_dup_name(zone.name_, zone_duplication, 'z') action.description = generate_dup_name(zone.description__, zone_duplication, 'zone') try: evt = action.run() deploy_logger(jsonobject.dumps(evt)) zinv = evt.inventory except: exc_info.append(sys.exc_info()) if xmlobject.has_element(zone, 'backupStorageRef'): for ref in xmlobject.safe_list(zone.backupStorageRef): bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=ref.text_) bs = get_first_item_from_list(bss, 'Backup Storage', ref.text_, 'attach backup storage to zone') action = api_actions.AttachBackupStorageToZoneAction() action.sessionUuid = session_uuid action.backupStorageUuid = bs.uuid action.zoneUuid = zinv.uuid try: evt = action.run() deploy_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) if not xmlobject.has_element(deployConfig, 'zones.zone'): return for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for i in range(duplication): thread = threading.Thread(target=_add_zone, args=(zone, i, )) wait_for_thread_queue() thread.start() wait_for_thread_done()
def build_property_option(xo): for p in xo.get_child_node_as_list('property'): name = p.name.text_ pairs = name.split('.') dest = '%s_%s' % (pairs[0], pairs[1]) option_name = '--%s-%s' % (pairs[0], pairs[1]) help = None if xmlobject.has_element(p, 'help'): help = p.help.text_ default = None if xmlobject.has_element(p, 'default'): default = p.default.text_ self.settting_command.add_argument(option_name, dest=dest, default=default, help=help) po = Property() po.name = name po.dest = dest po.xmlobj = p po.option_name = option_name self.properties.append((name, po)) self.properties_map[name] = po
def from_iptables_xml(): output = shell.call('/sbin/iptables-save | /bin/iptables-xml') obj = xmlobject.loads(output) ret = IPTables() if not xmlobject.has_element(obj, 'table'): return None for to in obj.table: t = Table() t.table_xml_object = to t._parse_chains() ret.tables[t.name] = t return ret
def deploy_2ha(scenarioConfig, scenarioFile, deployConfig): mn_ip1 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 0).ip_ mn_ip2 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 1).ip_ if not xmlobject.has_element(deployConfig, 'backupStorages.miniBackupStorage'): node3_ip = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 2).ip_ vip = os.environ['zstackHaVip'] change_ip_cmd1 = "zstack-ctl change_ip --ip=" + mn_ip1 ssh.execute(change_ip_cmd1, mn_ip1, "root", "password", False, 22) iptables_cmd1 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd1, mn_ip1, "root", "password", False, 22) change_ip_cmd2 = "zstack-ctl change_ip --ip=" + mn_ip2 ssh.execute(change_ip_cmd2, mn_ip2, "root", "password", False, 22) iptables_cmd2 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd2, mn_ip2, "root", "password", False, 22) woodpecker_vm_ip = shell.call("ip r | grep src | head -1 | awk '{print $NF}'").strip() zsha2_path = "/home/%s/zsha2" % woodpecker_vm_ip ssh.scp_file(zsha2_path, "/root/zsha2", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zsha2", mn_ip1, "root", "password", False, 22) zstack_hamon_path = "/home/%s/zstack-hamon" % woodpecker_vm_ip ssh.scp_file(zstack_hamon_path, "/root/zstack-hamon", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zstack-hamon", mn_ip1, "root", "password", False, 22) if xmlobject.has_element(deployConfig, 'backupStorages.miniBackupStorage'): cmd = '/root/zsha2 install-ha -nic br_zsn0 -gateway 172.20.0.1 -slave "root:password@' + mn_ip2 + '" -vip ' + vip + ' -time-server ' + mn_ip2 + ',' + mn_ip2 + ' -db-root-pw zstack.mysql.password -yes' else: cmd = '/root/zsha2 install-ha -nic br_zsn0 -gateway 172.20.0.1 -slave "root:password@' + mn_ip2 + '" -vip ' + vip + ' -time-server ' + node3_ip + ',' + mn_ip2 + ' -db-root-pw zstack.mysql.password -yes' test_util.test_logger("deploy 2ha by cmd: %s" %(cmd)) ret, output, stderr = ssh.execute(cmd, mn_ip1, "root", "password", False, 22) test_util.test_logger("cmd=%s; ret=%s; output=%s; stderr=%s" %(cmd, ret, output, stderr)) if ret!=0: test_util.test_fail("deploy 2ha failed")
def add_image(deployConfig, session_uuid): def _add_image(action): increase_image_thread() try: evt = action.run() test_util.test_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) finally: decrease_image_thread() if not xmlobject.has_element(deployConfig, 'images.image'): return for i in xmlobject.safe_list(deployConfig.images.image): for bsref in xmlobject.safe_list(i.backupStorageRef): bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=bsref.text_) bs = get_first_item_from_list(bss, 'backup storage', bsref.text_, 'image') action = api_actions.AddImageAction() action.sessionUuid = session_uuid #TODO: account uuid will be removed later. action.accountUuid = inventory.INITIAL_SYSTEM_ADMIN_UUID action.backupStorageUuids = [bs.uuid] action.bits = i.bits__ if not action.bits: action.bits = 64 action.description = i.description__ action.format = i.format_ action.mediaType = i.mediaType_ action.guestOsType = i.guestOsType__ if not action.guestOsType: action.guestOsType = 'unknown' action.platform = i.platform__ if not action.platform: action.platform = 'Linux' action.hypervisorType = i.hypervisorType__ action.name = i.name_ action.url = i.url_ action.timeout = 1800000 thread = threading.Thread(target = _add_image, args = (action, )) print 'before add image1: %s' % i.url_ wait_for_image_thread_queue() print 'before add image2: %s' % i.url_ thread.start() print 'add image: %s' % i.url_ print 'all images add command are executed' wait_for_thread_done(True) print 'all images have been added'
def setup_static_ip(scenario_file): ssh_cmd = 'sshpass -p password ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' with open(scenario_file, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for vm in xmlobject.safe_list(scenario_file.vms.vm): mnip = vm.managementIp_ if xmlobject.has_element(vm, 'ips'): for ip in xmlobject.safe_list(vm.ips.ip): nic_ip = ip.ip_ if nic_ip.startswith("10"): if shell.run("%s %s 'ip a|grep br_zsn1'"%(ssh_cmd, mnip))== 0: nic = "br_zsn1" else: nic = "zsn1" netmask = "255.255.255.0" shell.call("%s %s zs-network-setting -i %s %s %s|exit 0" %(ssh_cmd, mnip, nic, nic_ip, netmask) ) return
def add_l2_resource(deploy_config, l2_name, zone_name = None, \ session_uuid = None): session_uuid_flag = True if not session_uuid: session_uuid = acc_ops.login_as_admin() session_uuid_flag = False try: dep_ops.add_l2_network(deploy_config, session_uuid, l2_name, \ zone_name = zone_name) l2_uuid = res_ops.get_resource(res_ops.L2_NETWORK, session_uuid, \ name = l2_name)[0].uuid for zone in xmlobject.safe_list(deploy_config.zones.zone): if zone_name and zone_name != zone.name_: continue for cluster in xmlobject.safe_list(zone.clusters.cluster): if xmlobject.has_element(cluster, 'l2NetworkRef'): for l2ref in xmlobject.safe_list(cluster.l2NetworkRef): if l2_name != l2ref.text_: continue cluster_uuid = res_ops.get_resource(res_ops.CLUSTER, \ session_uuid, name=cluster.name_)[0].uuid attach_l2(l2_uuid, cluster_uuid, session_uuid) dep_ops.add_l3_network(None, None, deploy_config, session_uuid, l2_name = l2_name, \ zone_name = zone_name) cond = res_ops.gen_query_conditions('l2NetworkUuid', '=', l2_uuid) l3_name = res_ops.query_resource(res_ops.L3_NETWORK, cond, \ session_uuid)[0].name dep_ops.add_virtual_router(None, None, deploy_config, session_uuid, \ l3_name = l3_name, zone_name = zone_name) except Exception as e: test_util.test_logger('[Error] zstack deployment meets exception when adding l2 resource .') traceback.print_exc(file=sys.stdout) raise e finally: if not session_uuid_flag: acc_ops.logout(session_uuid) test_util.action_logger('Complete add l2 resources for [uuid:] %s' \ % l2_uuid)
def write_properties_file(): content = [] property_file = os.path.join(self.HOME_DIR, 'zstack.properties') for name, value in self.settings: po = self.properties_map[name] if xmlobject.has_element(po.xmlobj, 'help'): wrapper = textwrap.TextWrapper() help = wrapper.wrap(po.xmlobj.help.text_) help = ['# %s' % h for h in help] help = [re.sub(r' +', ' ', h) for h in help] content.extend(help) content.append('%s=%s\n' % (name, value)) if os.path.exists(property_file): backup_file(property_file, self.PROPERTIES_BACKUP_DIR) with open(property_file, 'w') as fd: fd.write('\n'.join(content)) print 'wrote all settings to %s' % property_file
def get_mn_host(scenarioConfig, scenarioFile): mn_host_list = [] test_util.test_logger("@@DEBUG@@:<scenarioConfig:%s><scenarioFile:%s><scenarioFile is existed: %s>" \ %(str(scenarioConfig), str(scenarioFile), str(os.path.exists(scenarioFile)))) if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile): return mn_host_list test_util.test_logger("@@DEBUG@@: after config file exist check") for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): if xmlobject.has_element(vm, 'mnHostRef'): with open(scenarioFile, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for s_vm in xmlobject.safe_list(scenario_file.vms.vm): if s_vm.name_ == vm.name_: mn_host_list.append(s_vm) test_util.test_logger("@@DEBUG@@: %s" %(str(mn_host_list))) return mn_host_list
def add_disk_offering(deployConfig, session_uuid): def _add_disk_offering(disk_offering_xml_obj, session_uuid): action = api_actions.CreateDiskOfferingAction() action.sessionUuid = session_uuid action.name = disk_offering_xml_obj.name_ action.description = disk_offering_xml_obj.description_ action.diskSize = sizeunit.get_size(disk_offering_xml_obj.diskSize_) try: evt = action.run() test_util.test_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) if not xmlobject.has_element(deployConfig, 'diskOfferings.diskOffering'): return for disk_offering_xml_obj in \ xmlobject.safe_list(deployConfig.diskOfferings.diskOffering): thread = threading.Thread(target = _add_disk_offering, \ args = (disk_offering_xml_obj, session_uuid)) wait_for_thread_queue() thread.start() wait_for_thread_done()
def add_disk_offering(deployConfig, session_uuid): def _add_disk_offering(disk_offering_xml_obj, session_uuid): action = api_actions.CreateDiskOfferingAction() action.sessionUuid = session_uuid action.name = disk_offering_xml_obj.name_ action.description = disk_offering_xml_obj.description_ action.diskSize = sizeunit.get_size(disk_offering_xml_obj.diskSize_) if disk_offering_xml_obj.uuid__: action.resourceUuid = disk_offering_xml_obj.uuid__ evt = action.run() dinv = evt.inventory deploy_logger(jsonobject.dumps(evt)) if not xmlobject.has_element(deployConfig, 'diskOfferings.diskOffering'): return for disk_offering_xml_obj in \ xmlobject.safe_list(deployConfig.diskOfferings.diskOffering): thread = threading.Thread(target=_add_disk_offering, args=(disk_offering_xml_obj, session_uuid)) wait_for_thread_queue() thread.start() wait_for_thread_done()
def add_host(deployConfig, session_uuid, host_ip=None, zone_name=None, cluster_name=None): """ Base on an xml deploy config object to add hosts. If providing giving zone_name, cluster_name or host_ip, this function will only add related hosts. """ if not xmlobject.has_element(deployConfig, "zones.zone"): return def _deploy_host(cluster, zone_ref, cluster_ref): if not xmlobject.has_element(cluster, "hosts.host"): return if zone_ref == 0 and cluster_ref == 0: cluster_name = cluster.name_ else: cluster_name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c') cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name) cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network') for host in xmlobject.safe_list(cluster.hosts.host): if host_ip and host_ip != host.managementIp_: continue if host.duplication__ == None: host_duplication = 1 else: host_duplication = int(host.duplication__) for i in range(host_duplication): if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE: action = api_actions.AddKVMHostAction() action.username = host.username_ action.password = host.password_ action.timeout = AddKVMHostTimeOut if hasattr(host, 'sshPort_'): action.port = host.sshPort_ elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE: action = api_actions.AddSimulatorHostAction() if host.cpuCapacity__: action.cpuCapacity = host.cpuCapacity_ else: action.cpuCapacity = 416000 if host.memoryCapacity__: action.memoryCapacity = sizeunit.get_size(host.memoryCapacity_) else: action.memoryCapacity = sizeunit.get_size('1024G') action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.hostTags = host.hostTags__ if zone_ref == 0 and cluster_ref == 0 and i == 0: action.name = host.name_ action.description = host.description__ action.managementIp = host.managementIp_ if host.uuid__: action.resourceUuid = host.uuid__ else: action.name = generate_dup_name( generate_dup_name(generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.description = generate_dup_name( generate_dup_name(generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.managementIp = generate_dup_host_ip(host.managementIp_, zone_ref, cluster_ref, i) thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue if not xmlobject.has_element(zone, 'clusters.cluster'): continue if zone.duplication__ == None: zone_duplication = 1 else: zone_duplication = int(zone.duplication__) for zone_ref in range(zone_duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): if cluster_name and cluster_name != cluster.name_: continue if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): _deploy_host(cluster, zone_ref, cluster_ref) wait_for_thread_done() deploy_logger('All add KVM host actions are done.')
def add_l2_network(deployConfig, session_uuid, l2_name = None, zone_name = None): ''' If providing name, it will only add L2 network with the same name. ''' if not xmlobject.has_element(deployConfig, "zones.zone"): return def _deploy_l2_network(zone, is_vlan): if is_vlan: if not xmlobject.has_element(zone, "l2Networks.l2VlanNetwork"): return l2Network = zone.l2Networks.l2VlanNetwork else: if not xmlobject.has_element(zone, \ "l2Networks.l2NoVlanNetwork"): return l2Network = zone.l2Networks.l2NoVlanNetwork if zone.duplication__ == None: zone_dup = 1 else: zone_dup = int(zone.duplication__) for zone_ref in range(zone_dup): zoneName = generate_dup_name(zone.name_, zone_ref, 'z') zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zoneName) zinv = get_first_item_from_list(zinvs, 'Zone', zoneName, 'L2 network') #can only deal with single cluster duplication case. cluster = xmlobject.safe_list(zone.clusters.cluster)[0] if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): for l2 in xmlobject.safe_list(l2Network): if l2_name and l2_name != l2.name_: continue if not is_vlan or l2.duplication__ == None: l2_dup = 1 else: l2_dup = int(l2.duplication__) for j in range(l2_dup): l2Name = generate_dup_name(\ generate_dup_name(\ generate_dup_name(\ l2.name_, zone_ref, 'z')\ , cluster_ref, 'c')\ , j, 'n') l2Des = generate_dup_name(\ generate_dup_name(\ generate_dup_name(\ l2.description_, zone_ref, 'z')\ , cluster_ref, 'c')\ , j, 'n') if is_vlan: l2_vlan = int(l2.vlan_) + j if is_vlan: action = api_actions.CreateL2VlanNetworkAction() else: action = api_actions.CreateL2NoVlanNetworkAction() action.sessionUuid = session_uuid action.name = l2Name action.description = l2Des action.physicalInterface = l2.physicalInterface_ action.zoneUuid = zinv.uuid if is_vlan: action.vlan = l2_vlan thread = threading.Thread(\ target=_thread_for_action, \ args=(action,)) wait_for_thread_queue() thread.start() for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone.name_ != zone_name: continue _deploy_l2_network(zone, False) _deploy_l2_network(zone, True) wait_for_thread_done()
def _deploy_l2_network(zone, is_vlan): if is_vlan: if not xmlobject.has_element(zone, "l2Networks.l2VlanNetwork"): return l2Network = zone.l2Networks.l2VlanNetwork else: if not xmlobject.has_element(zone, "l2Networks.l2NoVlanNetwork"): return l2Network = zone.l2Networks.l2NoVlanNetwork if zone.duplication__ == None: zone_dup = 1 else: zone_dup = int(zone.duplication__) for zone_ref in range(zone_dup): zoneName = generate_dup_name(zone.name_, zone_ref, 'z') zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zoneName) zinv = get_first_item_from_list(zinvs, 'Zone', zoneName, 'L2 network') # can only deal with single cluster duplication case. cluster = xmlobject.safe_list(zone.clusters.cluster)[0] if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): for l2 in xmlobject.safe_list(l2Network): if l2_name and l2_name != l2.name_: continue if not is_vlan or l2.duplication__ == None: l2_dup = 1 else: l2_dup = int(l2.duplication__) for j in range(l2_dup): l2Name = generate_dup_name( generate_dup_name( generate_dup_name( l2.name_, zone_ref, 'z') , cluster_ref, 'c') , j, 'n') if not l2.description__: l2.description_ = 'l2' l2Des = generate_dup_name( generate_dup_name( generate_dup_name( l2.description_, zone_ref, 'z') , cluster_ref, 'c') , j, 'n') if is_vlan: l2_vlan = int(l2.vlan_) + j if is_vlan: action = api_actions.CreateL2VlanNetworkAction() else: action = api_actions.CreateL2NoVlanNetworkAction() action.sessionUuid = session_uuid action.name = l2Name action.description = l2Des action.physicalInterface = l2.physicalInterface_ action.zoneUuid = zinv.uuid if is_vlan: action.vlan = l2_vlan if l2.uuid__: action.resourceUuid = l2.uuid__ thread = threading.Thread( target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start()
def add_primary_storage(deployConfig, session_uuid, ps_name = None, \ zone_name = None): if not xmlobject.has_element(deployConfig, 'zones.zone'): deploy_logger('Not find zones.zone in config, skip primary storage deployment') return def _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref): if zone_ref == 0: zone_name = zone.name_ else: zone_name = generate_dup_name(zone.name_, zone_ref, 'z') zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name) zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'primary storage') action = api_actions.AddSimulatorPrimaryStorageAction() action.sessionUuid = session_uuid action.name = generate_dup_name(generate_dup_name(pr.name_, zone_ref, 'z'), cluster_ref, 'c') action.description = generate_dup_name(generate_dup_name(pr.description__, zone_ref, 'zone'), cluster_ref, 'cluster') action.url = generate_dup_name(generate_dup_name(pr.url_, zone_ref, 'z'), cluster_ref, 'c') action.type = inventory.SIMULATOR_PRIMARY_STORAGE_TYPE action.zoneUuid = zinv.uuid action.totalCapacity = sizeunit.get_size(pr.totalCapacity_) action.availableCapacity = sizeunit.get_size(pr.availableCapacity_) if pr.uuid__: action.resourceUuid = pr.uuid__ return action def _deploy_primary_storage(zone): if xmlobject.has_element(zone, 'primaryStorages.nfsPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddNfsPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.NFS_PRIMARY_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid if pr.uuid__: action.resourceUuid = pr.uuid__ thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.simulatorPrimaryStorage'): if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for pr in xmlobject.safe_list(zone.primaryStorages.simulatorPrimaryStorage): for zone_ref in range(duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): for pref in xmlobject.safe_list(cluster.primaryStorageRef): if pref.text_ == pr.name_: if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): action = _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref) thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone.name_ != zone_name: continue _deploy_primary_storage(zone) wait_for_thread_done()
self.case_name_max_len = len(teardowncase.name) if s.config__: suite.test_config = full_path(s.config__) self.info('discovering test cases in %s ...' % test_case_list) with open(test_case_list, 'r') as fd: xmlstr = fd.read() xo = xmlobject.loads(xmlstr) if xo.get_tag() != self.INTEGRATION_TEST_TAG: raise TestError('configuration must start with tag <%s>' % self.INTEGRATION_TEST_TAG) if xmlobject.has_element(xo, self.SUITE_TAG): for s in xo.get_child_node_as_list(self.SUITE_TAG): suite = TestSuite() suite.name = s.name_.replace(' ', '_') if s.hasattr('path_'): suite.path = s.path_ suite.id = self.suite_num self.suite_num += 1 suite.root_path = os.path.dirname(test_case_list) suite.timeout = s.timeout__ if (s.parallel__ and s.parallel__.isdigit() \ and (string.atoi(s.parallel__) > 1)): suite.parallel = string.atoi(s.parallel__) if (s.repeat__ and s.repeat__.isdigit() \
def add_primary_storage(deployConfig, session_uuid, ps_name = None, \ zone_name = None): if not xmlobject.has_element(deployConfig, 'zones.zone'): test_util.test_logger('Not find zones.zone in config, skip primary storage deployment') return def _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref): if zone_ref == 0: zone_name = zone.name_ else: zone_name = generate_dup_name(zone.name_, zone_ref, 'z') zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name) zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'primary storage') action = api_actions.AddSimulatorPrimaryStorageAction() action.sessionUuid = session_uuid action.name = generate_dup_name(generate_dup_name(pr.name_, zone_ref, 'z'), cluster_ref, 'c') action.description = generate_dup_name(generate_dup_name(pr.description__, zone_ref, 'zone'), cluster_ref, 'cluster') action.url = generate_dup_name(generate_dup_name(pr.url_, zone_ref, 'z'), cluster_ref, 'c') action.type = inventory.SIMULATOR_PRIMARY_STORAGE_TYPE action.zoneUuid = zinv.uuid action.totalCapacity = sizeunit.get_size(pr.totalCapacity_) action.availableCapacity = sizeunit.get_size(pr.availableCapacity_) return action def _deploy_primary_storage(zone): if xmlobject.has_element(zone, 'primaryStorages.IscsiFileSystemBackendPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.IscsiFileSystemBackendPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddIscsiFileSystemBackendPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.ISCSI_FILE_SYSTEM_BACKEND_PRIMARY_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid action.chapPassword = pr.chapPassword_ action.chapUsername = pr.chapUsername_ action.sshPassword = pr.sshPassword_ action.sshUsername = pr.sshUsername_ action.hostname = pr.hostname_ action.filesystemType = pr.filesystemType_ thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.localPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.localPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddLocalPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.LOCAL_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.cephPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.cephPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddCephPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.CEPH_PRIMARY_STORAGE_TYPE action.monUrls = pr.monUrls_.split(';') if pr.dataVolumePoolName__: action.dataVolumePoolName = pr.dataVolumePoolName__ if pr.rootVolumePoolName__: action.rootVolumePoolName = pr.rootVolumePoolName__ if pr.imageCachePoolName__: action.imageCachePoolName = pr.imageCachePoolName__ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.nfsPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddNfsPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.NFS_PRIMARY_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.simulatorPrimaryStorage'): if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for pr in xmlobject.safe_list(zone.primaryStorages.simulatorPrimaryStorage): for zone_ref in range(duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): for pref in xmlobject.safe_list(cluster.primaryStorageRef): if pref.text_ == pr.name_: if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): action = _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref) thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.sharedMountPointPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.sharedMountPointPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddSharedMountPointPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.url = pr.url_ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone.name_ != zone_name: continue _deploy_primary_storage(zone) wait_for_thread_done()
def _deploy_host(cluster, zone_ref, cluster_ref): if not xmlobject.has_element(cluster, "hosts.host"): return if zone_ref == 0 and cluster_ref == 0: cluster_name = cluster.name_ else: cluster_name = generate_dup_name( generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c') cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name) cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network') for host in xmlobject.safe_list(cluster.hosts.host): if host_ip and host_ip != host.managementIp_: continue if host.duplication__ == None: host_duplication = 1 else: host_duplication = int(host.duplication__) for i in range(host_duplication): if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE: action = api_actions.AddKVMHostAction() action.username = host.username_ action.password = host.password_ action.timeout = AddKVMHostTimeOut elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE: action = api_actions.AddSimulatorHostAction() if host.cpuCapacity__: action.cpuCapacity = host.cpuCapacity_ else: action.cpuCapacity = 416000 if host.memoryCapacity__: action.memoryCapacity = sizeunit.get_size( host.memoryCapacity_) else: action.memoryCapacity = sizeunit.get_size('1024G') action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.hostTags = host.hostTags__ if zone_ref == 0 and cluster_ref == 0 and i == 0: action.name = host.name_ action.description = host.description__ action.managementIp = host.managementIp_ if host.uuid__: action.resourceUuid = host.uuid__ else: action.name = generate_dup_name( generate_dup_name( generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.description = generate_dup_name( generate_dup_name( generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.managementIp = generate_dup_host_ip( host.managementIp_, zone_ref, cluster_ref, i) thread = threading.Thread(target=_thread_for_action, args=(action, )) wait_for_thread_queue() thread.start()
def add_backup_storage(deployConfig, session_uuid): if xmlobject.has_element(deployConfig, 'backupStorages.sftpBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.sftpBackupStorage): action = api_actions.AddSftpBackupStorageAction() action.sessionUuid = session_uuid action.name = bs.name_ action.description = bs.description__ action.url = bs.url_ action.username = bs.username_ action.password = bs.password_ action.hostname = bs.hostname_ if hasattr(bs, 'port_'): action.port = bs.port_ action.sshport = bs.port_ action.sshPort = bs.port_ action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution action.type = inventory.SFTP_BACKUP_STORAGE_TYPE thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage): action = api_actions.AddImageStoreBackupStorageAction() action.sessionUuid = session_uuid action.name = bs.name_ action.description = bs.description__ action.url = bs.url_ action.username = bs.username_ action.password = bs.password_ action.hostname = bs.hostname_ if hasattr(bs, 'port_'): action.port = bs.port_ action.sshport = bs.port_ action.sshPort = bs.port_ action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution action.type = inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() if xmlobject.has_element(deployConfig, 'backupStorages.cephBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.cephBackupStorage): action = api_actions.AddCephBackupStorageAction() action.sessionUuid = session_uuid action.name = bs.name_ action.description = bs.description__ action.monUrls = bs.monUrls_.split(';') if bs.poolName__: action.poolName = bs.poolName_ action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution action.type = inventory.CEPH_BACKUP_STORAGE_TYPE thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() if xmlobject.has_element(deployConfig, 'backupStorages.simulatorBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.simulatorBackupStorage): action = api_actions.AddSimulatorBackupStorageAction() action.sessionUuid = session_uuid action.name = bs.name_ action.description = bs.description__ action.url = bs.url_ action.type = inventory.SIMULATOR_BACKUP_STORAGE_TYPE action.totalCapacity = sizeunit.get_size(bs.totalCapacity_) action.availableCapacity = sizeunit.get_size(bs.availableCapacity_) thread = threading.Thread(target = _thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() wait_for_thread_done()
def _add_cluster(action, zone_ref, cluster, cluster_ref): evt = action.run() deploy_logger(jsonobject.dumps(evt)) cinv = evt.inventory try: if xmlobject.has_element(cluster, 'primaryStorageRef'): for pref in xmlobject.safe_list(cluster.primaryStorageRef): ps_name = generate_dup_name( generate_dup_name(pref.text_, zone_ref, 'z'), cluster_ref, 'c') pinvs = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, name=ps_name) pinv = get_first_item_from_list(pinvs, 'Primary Storage', ps_name, 'Cluster') action_ps = api_actions.AttachPrimaryStorageToClusterAction( ) action_ps.sessionUuid = session_uuid action_ps.clusterUuid = cinv.uuid action_ps.primaryStorageUuid = pinv.uuid evt = action_ps.run() deploy_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) if cluster.allL2NetworkRef__ == 'true': #find all L2 network in zone and attach to cluster cond = res_ops.gen_query_conditions('zoneUuid', '=', \ action.zoneUuid) l2_count = res_ops.query_resource_count(res_ops.L2_NETWORK, \ cond, session_uuid) l2invs = res_ops.query_resource_fields(res_ops.L2_NETWORK, \ [{'name':'zoneUuid', 'op':'=', 'value':action.zoneUuid}], \ session_uuid, ['uuid'], 0, l2_count) else: l2invs = [] if xmlobject.has_element(cluster, 'l2NetworkRef'): for l2ref in xmlobject.safe_list(cluster.l2NetworkRef): l2_name = generate_dup_name( generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c') cond = res_ops.gen_query_conditions('zoneUuid', '=', \ action.zoneUuid) cond = res_ops.gen_query_conditions('name', '=', l2_name, \ cond) l2inv = res_ops.query_resource_fields(res_ops.L2_NETWORK, \ cond, session_uuid, ['uuid']) if not l2inv: raise DeployError( "Can't find l2 network [%s] in database." % l2_name) l2invs.extend(l2inv) for l2inv in l2invs: action = api_actions.AttachL2NetworkToClusterAction() action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.l2NetworkUuid = l2inv.uuid thread = threading.Thread(target=_thread_for_action, args=(action, )) wait_for_thread_queue() thread.start()
def add_l3_network(deployConfig, session_uuid, l3_name = None, l2_name = None, \ zone_name = None): ''' add_l3_network will add L3 network and also add related DNS, IpRange and network services. ''' if not xmlobject.has_element(deployConfig, "zones.zone"): return def _deploy_l3_network(l2, zone_ref, cluster_ref): if not xmlobject.has_element(l2, "l3Networks.l3BasicNetwork"): return if not l2.duplication__: l2_dup = 1 else: l2_dup = int(l2.duplication__) for l2_num in range(l2_dup): for l3 in xmlobject.safe_list(l2.l3Networks.l3BasicNetwork): if l3_name and l3_name != l3.name_: continue l2Name = generate_dup_name(generate_dup_name(generate_dup_name(l2.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n') l3Name = generate_dup_name(generate_dup_name(generate_dup_name(l3.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n') l2invs = res_ops.get_resource(res_ops.L2_NETWORK, \ session_uuid, \ name=l2Name) l2inv = get_first_item_from_list(l2invs, \ 'L2 Network', l2Name, 'L3 Network') thread = threading.Thread(target=_do_l3_deploy, \ args=(l3, l2inv.uuid, l3Name, session_uuid, )) wait_for_thread_queue() thread.start() def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid): action = api_actions.CreateL3NetworkAction() action.sessionUuid = session_uuid action.description = l3.description__ if l3.system__ and l3.system__ != 'False': action.system = 'true' action.l2NetworkUuid = l2inv_uuid action.name = l3Name action.type = inventory.L3_BASIC_NETWORK_TYPE if l3.domain_name__: action.dnsDomain = l3.domain_name__ try: evt = action.run() except: exc_info.append(sys.exc_info()) test_util.test_logger(jsonobject.dumps(evt)) l3_inv = evt.inventory #add dns if xmlobject.has_element(l3, 'dns'): for dns in xmlobject.safe_list(l3.dns): action = api_actions.AddDnsToL3NetworkAction() action.sessionUuid = session_uuid action.dns = dns.text_ action.l3NetworkUuid = l3_inv.uuid try: evt = action.run() except: exc_info.append(sys.exc_info()) test_util.test_logger(jsonobject.dumps(evt)) #add ip range. if xmlobject.has_element(l3, 'ipRange'): do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid) #add network service. providers = {} action = api_actions.QueryNetworkServiceProviderAction() action.sessionUuid = session_uuid action.conditions = [] try: reply = action.run() except: exc_info.append(sys.exc_info()) for pinv in reply: providers[pinv.name] = pinv.uuid if xmlobject.has_element(l3, 'networkService'): do_add_network_service(l3.networkService, l3_inv.uuid, \ providers, session_uuid) for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue l2networks = [] if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork)) if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork)) for l2 in l2networks: if l2_name and l2_name != l2.name_: continue if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for zone_ref in range(duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): if zone_ref == 1 and cluster_ref == 1: zone_ref = 0 cluster_ref = 0 _deploy_l3_network(l2, zone_ref, cluster_ref) wait_for_thread_done() test_util.test_logger('All add L3 Network actions are done.')
def add_host(deployConfig, session_uuid, host_ip = None, zone_name = None, \ cluster_name = None): ''' Base on an xml deploy config object to add hosts. If providing giving zone_name, cluster_name or host_ip, this function will only add related hosts. ''' if not xmlobject.has_element(deployConfig, "zones.zone"): return def _deploy_host(cluster, zone_ref, cluster_ref): if not xmlobject.has_element(cluster, "hosts.host"): return if zone_ref == 0 and cluster_ref == 0: cluster_name = cluster.name_ else: cluster_name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c') cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name) cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network') for host in xmlobject.safe_list(cluster.hosts.host): if host_ip and host_ip != host.managementIp_: continue if host.duplication__ == None: host_duplication = 1 else: host_duplication = int(host.duplication__) for i in range(host_duplication): if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE: action = api_actions.AddKVMHostAction() action.username = host.username_ action.password = host.password_ if hasattr(host, 'port_'): action.port = host.port_ action.sshport = host.port_ action.sshPort = host.port_ action.timeout = AddKVMHostTimeOut elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE: action = api_actions.AddSimulatorHostAction() action.cpuCapacity = host.cpuCapacity_ action.memoryCapacity = sizeunit.get_size(host.memoryCapacity_) action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.hostTags = host.hostTags__ if zone_ref == 0 and cluster_ref == 0 and i == 0: action.name = host.name_ action.description = host.description__ action.managementIp = host.managementIp_ else: action.name = generate_dup_name(generate_dup_name(generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.description = generate_dup_name(generate_dup_name(generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h') action.managementIp = generate_dup_host_ip(host.managementIp_, zone_ref, cluster_ref, i) thread = threading.Thread(target=_thread_for_action, args = (action, )) wait_for_thread_queue() thread.start() for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue if not xmlobject.has_element(zone, 'clusters.cluster'): continue if zone.duplication__ == None: zone_duplication = 1 else: zone_duplication = int(zone.duplication__) for zone_ref in range(zone_duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): if cluster_name and cluster_name != cluster.name_: continue if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): _deploy_host(cluster, zone_ref, cluster_ref) wait_for_thread_done() test_util.test_logger('All add KVM host actions are done.')
def add_l3_network(deployConfig, session_uuid, l3_name=None, l2_name=None, zone_name=None): ''' add_l3_network will add L3 network and also add related DNS, IpRange and network services. ''' if not xmlobject.has_element(deployConfig, "zones.zone"): return def _deploy_l3_network(l2, zone_ref, cluster_ref): if not xmlobject.has_element(l2, "l3Networks.l3BasicNetwork"): return if not l2.duplication__: l2_dup = 1 else: l2_dup = int(l2.duplication__) for l2_num in range(l2_dup): for l3 in xmlobject.safe_list(l2.l3Networks.l3BasicNetwork): if l3_name and l3_name != l3.name_: continue l2Name = generate_dup_name( generate_dup_name(generate_dup_name(l2.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n') l3Name = generate_dup_name( generate_dup_name(generate_dup_name(l3.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n') l2invs = res_ops.get_resource(res_ops.L2_NETWORK, session_uuid, name=l2Name) l2inv = get_first_item_from_list(l2invs, 'L2 Network', l2Name, 'L3 Network') thread = threading.Thread(target=_do_l3_deploy, args=(l3, l2inv.uuid, l3Name, session_uuid,)) wait_for_thread_queue() thread.start() def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid): action = api_actions.CreateL3NetworkAction() action.sessionUuid = session_uuid action.description = l3.description__ if l3.system__ and l3.system__ != 'False': action.system = 'true' action.l2NetworkUuid = l2inv_uuid action.name = l3Name if l3.uuid__: action.resourceUuid = l3.uuid__ action.type = inventory.L3_BASIC_NETWORK_TYPE if l3.domain_name__: action.dnsDomain = l3.domain_name__ try: evt = action.run() except: exc_info.append(sys.exc_info()) deploy_logger(jsonobject.dumps(evt)) l3_inv = evt.inventory # add dns if xmlobject.has_element(l3, 'dns'): for dns in xmlobject.safe_list(l3.dns): action = api_actions.AddDnsToL3NetworkAction() action.sessionUuid = session_uuid action.dns = dns.text_ action.l3NetworkUuid = l3_inv.uuid try: evt = action.run() except: exc_info.append(sys.exc_info()) deploy_logger(jsonobject.dumps(evt)) # add ip range. if xmlobject.has_element(l3, 'ipRange'): do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid) # add network service. providers = {} action = api_actions.QueryNetworkServiceProviderAction() action.sessionUuid = session_uuid action.conditions = [] try: reply = action.run() except: exc_info.append(sys.exc_info()) for pinv in reply: providers[pinv.name] = pinv.uuid if xmlobject.has_element(l3, 'networkService'): do_add_network_service(l3.networkService, l3_inv.uuid, providers, session_uuid) for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue l2networks = [] if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork)) if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'): l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork)) for l2 in l2networks: if l2_name and l2_name != l2.name_: continue if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) if duplication == 1: _deploy_l3_network(l2, 0, 0) else: for zone_ref in range(duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): _deploy_l3_network(l2, zone_ref, cluster_ref) wait_for_thread_done() deploy_logger('All add L3 Network actions are done.')
def add_virtual_router(deployConfig, session_uuid, l3_name = None, \ zone_name = None): if not xmlobject.has_element(deployConfig, 'instanceOfferings.virtualRouterOffering'): return for i in xmlobject.safe_list( deployConfig.instanceOfferings.virtualRouterOffering): if l3_name and l3_name != i.managementL3NetworkRef.text_: continue if zone_name and zone_name != i.zoneRef.text_: continue action = api_actions.CreateVirtualRouterOfferingAction() action.sessionUuid = session_uuid action.name = i.name_ action.description = i.description__ action.cpuNum = i.cpuNum_ action.cpuSpeed = i.cpuSpeed_ action.memorySize = sizeunit.get_size(i.memorySize_) action.isDefault = i.isDefault__ action.type = 'VirtualRouter' if i.uuid__: action.resourceUuid = i.uuid__ zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=i.zoneRef.text_) zinv = get_first_item_from_list(zinvs, 'zone', i.zoneRef.text_, 'virtual router offering') action.zoneUuid = zinv.uuid cond = res_ops.gen_query_conditions('zoneUuid', '=', zinv.uuid) cond1 = res_ops.gen_query_conditions('name', '=', \ i.managementL3NetworkRef.text_, cond) minvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \ session_uuid) minv = get_first_item_from_list(minvs, 'Management L3 Network', i.managementL3NetworkRef.text_, 'virtualRouterOffering') action.managementNetworkUuid = minv.uuid if xmlobject.has_element(i, 'publicL3NetworkRef'): cond1 = res_ops.gen_query_conditions('name', '=', \ i.publicL3NetworkRef.text_, cond) pinvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \ session_uuid) pinv = get_first_item_from_list(pinvs, 'Public L3 Network', i.publicL3NetworkRef.text_, 'virtualRouterOffering') action.publicNetworkUuid = pinv.uuid iinvs = res_ops.get_resource(res_ops.IMAGE, session_uuid, \ name=i.imageRef.text_) iinv = get_first_item_from_list(iinvs, 'Image', i.imageRef.text_, 'virtualRouterOffering') action.imageUuid = iinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action, )) wait_for_thread_queue() thread.start() wait_for_thread_done()
def check_deployed_vcenter(deploy_config, scenario_config=None, scenario_file=None): vc_name = os.environ.get('vcenter') vslist = {} if xmlobject.has_element(deploy_config, 'vcenter.datacenters.datacenter'): assert deploy_config.vcenter.name_ == vc_ops.lib_get_vcenter_by_name( vc_name).name for datacenter in xmlobject.safe_list( deploy_config.vcenter.datacenters.datacenter): dportgroup_list = [] if xmlobject.has_element(datacenter, 'dswitch'): for dswitch in xmlobject.safe_list(datacenter.dswitch): for dportgroup in xmlobject.safe_list( dswitch.dportgroups.dportgroup): dportgroup_list.append(dportgroup.name_) for cluster in xmlobject.safe_list(datacenter.clusters.cluster): sign = None assert cluster.name_ == vc_ops.lib_get_vcenter_cluster_by_name( cluster.name_).name for host in xmlobject.safe_list(cluster.hosts.host): vslist[host.name_] = {'vSwitch0': ['VM Network.0']} managementIp = dep_ops.get_host_from_scenario_file( host.name_, scenario_config, scenario_file, deploy_config) assert managementIp == vc_ops.lib_get_vcenter_host_by_ip( managementIp).name assert vc_ops.lib_get_vcenter_host_by_ip( managementIp).hypervisorType == "ESX" if xmlobject.has_element(host, "iScsiStorage.vmfsdatastore"): assert host.iScsiStorage.vmfsdatastore.name_ == vc_ops.lib_get_vcenter_primary_storage_by_name( host.iScsiStorage.vmfsdatastore.name_).name assert vc_ops.lib_get_vcenter_primary_storage_by_name( host.iScsiStorage.vmfsdatastore.name_ ).type == "VCenter" assert host.iScsiStorage.vmfsdatastore.name_ == vc_ops.lib_get_vcenter_backup_storage_by_name( host.iScsiStorage.vmfsdatastore.name_).name assert vc_ops.lib_get_vcenter_backup_storage_by_name( host.iScsiStorage.vmfsdatastore.name_ ).type == "VCenter" if xmlobject.has_element(host, "vswitchs"): for vswitch in xmlobject.safe_list(host.vswitchs.vswitch): if vswitch.name_ == "vSwitch0": for port_group in xmlobject.safe_list( vswitch.portgroup): vslist[host.name_]['vSwitch0'].append( port_group.text_ + '.' + port_group.vlanId_) else: vslist[host.name_][vswitch.name_] = [] for port_group in xmlobject.safe_list( vswitch.portgroup): vslist[host.name_][vswitch.name_].append( port_group.text_ + '.' + port_group.vlanId_) if xmlobject.has_element(host, "dswitchRef"): sign = 1 for vm in xmlobject.safe_list(host.vms.vm): assert vm.name_ == vc_ops.lib_get_vm_by_name(vm.name_).name assert vc_ops.lib_get_vm_by_name( vm.name_).hypervisorType == "ESX" assert vc_ops.lib_get_vm_by_name( vm.name_).state == "Running" if xmlobject.has_element(cluster, "templates"): for template in xmlobject.safe_list( cluster.templates.template): templ_name = template.path_ tp_name = templ_name.split('/')[-1].split('.')[0] assert tp_name == vc_ops.lib_get_root_image_by_name( tp_name).name for dportgroup_name in dportgroup_list: if sign: assert dportgroup_name == vc_ops.lib_get_vcenter_l2_by_name( dportgroup_name).name assert "L3-" + dportgroup_name == vc_ops.lib_get_vcenter_l3_by_name( "L3-" + dportgroup_name).name cluster_list = vc_ops.lib_get_vcenter_l2_by_name( dportgroup_name).attachedClusterUuids if vc_ops.lib_get_vcenter_cluster_by_name( cluster.name_).uuid not in cluster_list: test_util.test_fail("dpg not sync success") else: if vc_ops.lib_get_vcenter_l2_by_name(dportgroup_name): cluster_list = vc_ops.lib_get_vcenter_l2_by_nam( dportgroup_name).attachedClusterUuids if vc_ops.lib_get_vcenter_cluster_by_name( cluster.name_).uuid in cluster_list: test_util.test_fail("dpg not sync success") else: assert vc_ops.lib_get_vcenter_l2_by_name( dportgroup_name) == None assert vc_ops.lib_get_vcenter_l3_by_name( "L3-" + dportgroup_name) == None pg_list, vlan_list, non_pg_list, non_vlan_list = get_pgs(vslist) for pg in pg_list: assert pg == vc_ops.lib_get_vcenter_l2_by_name(pg).name assert "L3-" + pg == vc_ops.lib_get_vcenter_l3_by_name("L3-" + pg).name cluster_list = vc_ops.lib_get_vcenter_l2_by_name( pg).attachedClusterUuids if vc_ops.lib_get_vcenter_cluster_by_name( cluster.name_).uuid not in cluster_list: test_util.test_fail("pg not sync success") for non_pg in non_pg_list: if vc_ops.lib_get_vcenter_l2_by_name(non_pg): cluster_list = vc_ops.lib_get_vcenter_l2_by_name( non_pg).attachedClusterUuids if vc_ops.lib_get_vcenter_cluster_by_name( cluster.name_).uuid in cluster_list: test_util.test_fail("pg not sync success") else: assert vc_ops.lib_get_vcenter_l2_by_name(non_pg) == None assert vc_ops.lib_get_vcenter_l3_by_name("L3-" + non_pg) == None
def deploy_scenario(scenario_config, scenario_file, deploy_config): vm_inv_lst = [] vm_cfg_lst = [] eip_lst = [] vip_lst = [] ocfs2smp_shareable_volume_is_created = False zstack_management_ip = scenario_config.basicConfig.zstackManagementIp.text_ root_xml = etree.Element("deployerConfig") vms_xml = etree.SubElement(root_xml, 'vms') for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): vm_creation_option = test_util.VmOption() l3_uuid_list = [] default_l3_uuid = None for l3network in xmlobject.safe_list(vm.l3Networks.l3Network): if not default_l3_uuid: default_l3_uuid = l3network.uuid_ l3_uuid_list.append(l3network.uuid_) vm_creation_option.set_instance_offering_uuid(vm.vmInstranceOfferingUuid_) vm_creation_option.set_l3_uuids(l3_uuid_list) vm_creation_option.set_image_uuid(vm.imageUuid_) vm_creation_option.set_name(vm.name_) vm_creation_option.set_host_uuid(host.uuid_) #vm_creation_option.set_data_disk_uuids(disk_offering_uuids) #vm_creation_option.set_default_l3_uuid(default_l3_uuid) #vm_creation_option.set_system_tags(system_tags) #vm_creation_option.set_ps_uuid(ps_uuid) #vm_creation_option.set_session_uuid(session_uuid) vm_inv = create_vm(zstack_management_ip, vm_creation_option) vm_ip = test_lib.lib_get_vm_nic_by_l3(vm_inv, default_l3_uuid).ip test_lib.lib_wait_target_up(vm_ip, '22', 120) vm_xml = etree.SubElement(vms_xml, 'vm') vm_xml.set('name', vm.name_) vm_xml.set('uuid', vm_inv.uuid) vm_xml.set('ip', vm_ip) setup_vm_no_password(vm_inv, vm, deploy_config) setup_vm_console(vm_inv, vm, deploy_config) stop_vm(zstack_management_ip, vm_inv.uuid) start_vm(zstack_management_ip, vm_inv.uuid) test_lib.lib_wait_target_up(vm_ip, '22', 120) ips_xml = etree.SubElement(vm_xml, 'ips') for l3_uuid in l3_uuid_list: ip_xml = etree.SubElement(ips_xml, 'ip') ip = test_lib.lib_get_vm_nic_by_l3(vm_inv, l3_uuid).ip ip_xml.set('ip', ip) #setup eip if xmlobject.has_element(vm, 'eipRef'): vm_nic = vm_inv.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid for l3network in xmlobject.safe_list(vm.l3Networks.l3Network): vip = test_stub.create_vip('scenario-auto-vip', l3network.uuid_) vip_lst.append(vip) eip = test_stub.create_eip(l3network.eipRef.text_, vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm_inv) eip_lst.append(eip) vip.attach_eip(eip) vm_xml.set('ip', eip.get_eip().vipIp) if xmlobject.has_element(vm, 'nodeRef'): setup_node_vm(vm_inv, vm, deploy_config) if xmlobject.has_element(vm, 'hostRef'): setup_host_vm(vm_inv, vm, deploy_config) vm_inv_lst.append(vm_inv) vm_cfg_lst.append(vm) vm_xml.set('managementIp', vm_ip) if xmlobject.has_element(vm, 'mnHostRef'): setup_mn_host_vm(vm_inv, vm) if xmlobject.has_element(vm, 'backupStorageRef'): volume_option = test_util.VolumeOption() volume_option.set_name(os.environ.get('volumeName')) for bs_ref in xmlobject.safe_list(vm.backupStorageRef): if bs_ref.type_ == 'ceph': disk_offering_uuid = bs_ref.offering_uuid_ volume_option.set_disk_offering_uuid(disk_offering_uuid) volume_inv = create_volume_from_offering(zstack_management_ip, volume_option) attach_volume(zstack_management_ip, volume_inv.uuid, vm_inv.uuid) break if bs_ref.type_ == 'fusionstor': disk_offering_uuid = bs_ref.offering_uuid_ volume_option.set_disk_offering_uuid(disk_offering_uuid) volume_inv = create_volume_from_offering(zstack_management_ip, volume_option) volume_inv1 = create_volume_from_offering(zstack_management_ip, volume_option) volume_inv2 = create_volume_from_offering(zstack_management_ip, volume_option) attach_volume(zstack_management_ip, volume_inv.uuid, vm_inv.uuid) attach_volume(zstack_management_ip, volume_inv1.uuid, vm_inv.uuid) attach_volume(zstack_management_ip, volume_inv2.uuid, vm_inv.uuid) break setup_backupstorage_vm(vm_inv, vm, deploy_config) if xmlobject.has_element(vm, 'primaryStorageRef'): setup_primarystorage_vm(vm_inv, vm, deploy_config) for ps_ref in xmlobject.safe_list(vm.primaryStorageRef): if ps_ref.type_ == 'ocfs2smp': if ocfs2smp_shareable_volume_is_created == False and hasattr(ps_ref, 'disk_offering_uuid_'): ocfs2smp_disk_offering_uuid = ps_ref.disk_offering_uuid_ volume_option.set_disk_offering_uuid(ocfs2smp_disk_offering_uuid) volume_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) share_volume_inv = create_volume_from_offering(zstack_management_ip, volume_option) ocfs2smp_shareable_volume_is_created = True attach_volume(zstack_management_ip, share_volume_inv.uuid, vm_inv.uuid) xml_string = etree.tostring(root_xml, 'utf-8') xml_string = minidom.parseString(xml_string).toprettyxml(indent=" ") open(scenario_file, 'w+').write(xml_string) setup_ceph_storages(scenario_config, scenario_file, deploy_config) setup_ocfs2smp_primary_storages(scenario_config, scenario_file, deploy_config, vm_inv_lst, vm_cfg_lst) setup_fusionstor_storages(scenario_config, scenario_file, deploy_config)
def _deploy_primary_storage(zone): if xmlobject.has_element(zone, 'primaryStorages.IscsiFileSystemBackendPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.IscsiFileSystemBackendPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddIscsiFileSystemBackendPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.ISCSI_FILE_SYSTEM_BACKEND_PRIMARY_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid action.chapPassword = pr.chapPassword_ action.chapUsername = pr.chapUsername_ action.sshPassword = pr.sshPassword_ action.sshUsername = pr.sshUsername_ action.hostname = pr.hostname_ action.filesystemType = pr.filesystemType_ thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.localPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.localPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddLocalPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.LOCAL_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.cephPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.cephPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddCephPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.CEPH_PRIMARY_STORAGE_TYPE action.monUrls = pr.monUrls_.split(';') if pr.dataVolumePoolName__: action.dataVolumePoolName = pr.dataVolumePoolName__ if pr.rootVolumePoolName__: action.rootVolumePoolName = pr.rootVolumePoolName__ if pr.imageCachePoolName__: action.imageCachePoolName = pr.imageCachePoolName__ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.nfsPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddNfsPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.type = inventory.NFS_PRIMARY_STORAGE_TYPE action.url = pr.url_ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.simulatorPrimaryStorage'): if zone.duplication__ == None: duplication = 1 else: duplication = int(zone.duplication__) for pr in xmlobject.safe_list(zone.primaryStorages.simulatorPrimaryStorage): for zone_ref in range(duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): for pref in xmlobject.safe_list(cluster.primaryStorageRef): if pref.text_ == pr.name_: if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): action = _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref) thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() if xmlobject.has_element(zone, 'primaryStorages.sharedMountPointPrimaryStorage'): zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name=zone.name_) zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage') for pr in xmlobject.safe_list(zone.primaryStorages.sharedMountPointPrimaryStorage): if ps_name and ps_name != pr.name_: continue action = api_actions.AddSharedMountPointPrimaryStorageAction() action.sessionUuid = session_uuid action.name = pr.name_ action.description = pr.description__ action.url = pr.url_ action.zoneUuid = zinv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start()
def _set_and_validate_config(self): basic_config = self.config.basicConfig deploy_config = self.config.deployerConfig self.zstack_pkg = self._full_path(basic_config.zstackPkg.text_) self.zstack_install_script = \ self._full_path(basic_config.zstackInstallScript.text_) if not os.path.exists(self.zstack_pkg): raise ActionError('unable to find %s for ZStack binary' \ % self.zstack_pkg) if basic_config.hasattr('zstackInstallPath'): self.install_path = \ self._full_path(basic_config.zstackInstallPath.text_) else: raise ActionError(\ 'need to set config.deployerConfig.zstackInstallPath in : %s' % self.deploy_config_path) #set ZSTACK_HOME, which will be used by zstack-ctl os.environ['ZSTACK_HOME'] = '%s/apache-tomcat/webapps/zstack/' % \ self.install_path if basic_config.hasattr('testAgent'): self.test_agent_path = self._full_path( basic_config.testAgent.text_) linux.error_if_path_missing(self.test_agent_path) for zone in deploy_config.zones.get_child_node_as_list('zone'): for cluster in zone.clusters.get_child_node_as_list('cluster'): if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE: for h in cluster.hosts.get_child_node_as_list('host'): h.managementIp_ h.username_ h.password_ # will raise exception if one of above not specified in xml filea. self.test_agent_hosts.append(h) else: if xmlobject.has_element(basic_config, 'testAgentHost'): raise ActionError( '<tesgAgent> is missing while <testAgentHost> presents') self.catalina_home = self.install_path + '/apache-tomcat' self.wait_for_start_timeout = basic_config.get( 'managementServerStartTimeout') if not self.wait_for_start_timeout: self.wait_for_start_timeout = 120 else: self.wait_for_start_timeout = int(self.wait_for_start_timeout) if hasattr(basic_config, 'rabbitmq'): self.rabbitmq_server = basic_config.rabbitmq.get( 'server', 'localhost') self.rabbitmq_server_root_passwd = basic_config.rabbitmq.get( 'password', '') if not self.rabbitmq_server_root_passwd: print('!!!WARN! Rabbitmq server root password are not set!') else: raise ActionError( 'need to set config.basicConfig.rabbitmq.server in: %s' % self.deploy_config_path) if hasattr(basic_config, 'db'): self.need_deploy_db = True self.db_server = basic_config.db.get('server', 'localhost') self.db_username = basic_config.db.get('username', 'zstack') self.db_password = basic_config.db.get('password', '') self.db_admin_username = basic_config.db.get('admin', 'root') self.db_admin_password = basic_config.db.get('adminPassword', '') self.db_server_root_password = basic_config.db.get( 'server_root_password', '') if not self.db_server_root_password: print('!!!WARN! Database server root password are not set!') self.db_port = basic_config.db.get('port', '3306') if basic_config.has_element('zstackProperties'): if basic_config.zstackProperties.text_: self.zstack_properties = self._full_path( basic_config.zstackProperties.text_) if not os.path.exists(self.zstack_properties): print( 'unable to find zstackProperties at %s, use \ default one' % self.zstack_properties) self.zstack_properties = None if basic_config.has_element('zstackHaVip'): self.zstack_ha_vip = basic_config.zstackHaVip.text_ else: self.zstack_ha_vip = None if basic_config.has_element('zstackManagementIp'): self.zstack_management_ip = basic_config.zstackManagementIp.text_ else: self.zstack_management_ip = None os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = '' if deploy_config.has_element('nodes') \ and deploy_config.nodes.has_element('node'): for node in deploy_config.nodes.get_child_node_as_list('node'): node.ip_ self.nodes.append(node) if linux.is_ip_existing(node.ip_): os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = node.ip_ elif not os.environ.get('ZSTACK_BUILT_IN_HTTP_SERVER_IP'): os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = node.ip_ else: raise ActionError( 'deploy.xml setting error. No deployerConfig.nodes.node is found. ' ) if self.zstack_ha_vip != None: os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = self.zstack_ha_vip if self.zstack_management_ip != None: os.environ[ 'ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = self.zstack_management_ip if not os.environ.get('ZSTACK_BUILT_IN_HTTP_SERVER_IP'): raise ActionError( 'deploy.xml setting error. No deployerConfig.nodes.node.ip is defined. ' )
def test(): if test_lib.scenario_config == None or test_lib.scenario_file ==None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_stub.deploy_2ha(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) mn_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 0).ip_ mn_ip2 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 1).ip_ if not xmlobject.has_element(test_lib.deploy_config, 'backupStorages.miniBackupStorage'): host_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 2).ip_ test_stub.recover_vlan_in_host(host_ip1, test_lib.all_scenario_config, test_lib.deploy_config) test_stub.wrapper_of_wait_for_management_server_start(600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip1, 'root', 'password') ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip2, 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip1)) os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip2)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) test_lib.lib_set_primary_storage_imagecache_gc_interval(1) #test_lib.lib_set_reserved_memory('1G') if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-local-ps.xml"], ["scenario-config-upgrade-3.1.1.xml"]): cmd = r"sed -i '$a\172.20.198.8 rsync.repo.zstack.io' /etc/hosts" ssh.execute(cmd, mn_ip1, "root", "password", False, 22) ssh.execute(cmd, mn_ip2, "root", "password", False, 22) test_util.test_pass('Suite Setup Success')
def add_cluster(deployConfig, session_uuid, cluster_name = None, \ zone_name = None): if not xmlobject.has_element(deployConfig, "zones.zone"): return def _add_cluster(action, zone_ref, cluster, cluster_ref): evt = action.run() test_util.test_logger(jsonobject.dumps(evt)) cinv = evt.inventory try: if xmlobject.has_element(cluster, 'primaryStorageRef'): for pref in xmlobject.safe_list(cluster.primaryStorageRef): ps_name = generate_dup_name(generate_dup_name(pref.text_, zone_ref, 'z'), cluster_ref, 'c') pinvs = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, name=ps_name) pinv = get_first_item_from_list(pinvs, 'Primary Storage', ps_name, 'Cluster') action_ps = api_actions.AttachPrimaryStorageToClusterAction() action_ps.sessionUuid = session_uuid action_ps.clusterUuid = cinv.uuid action_ps.primaryStorageUuid = pinv.uuid evt = action_ps.run() test_util.test_logger(jsonobject.dumps(evt)) except: exc_info.append(sys.exc_info()) if cluster.allL2NetworkRef__ == 'true': #find all L2 network in zone and attach to cluster cond = res_ops.gen_query_conditions('zoneUuid', '=', \ action.zoneUuid) l2_count = res_ops.query_resource_count(res_ops.L2_NETWORK, \ cond, session_uuid) l2invs = res_ops.query_resource_fields(res_ops.L2_NETWORK, \ [{'name':'zoneUuid', 'op':'=', 'value':action.zoneUuid}], \ session_uuid, ['uuid'], 0, l2_count) else: l2invs = [] if xmlobject.has_element(cluster, 'l2NetworkRef'): for l2ref in xmlobject.safe_list(cluster.l2NetworkRef): l2_name = generate_dup_name(generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c') cond = res_ops.gen_query_conditions('zoneUuid', '=', \ action.zoneUuid) cond = res_ops.gen_query_conditions('name', '=', l2_name, \ cond) l2inv = res_ops.query_resource_fields(res_ops.L2_NETWORK, \ cond, session_uuid, ['uuid']) if not l2inv: raise test_util.TestError("Can't find l2 network [%s] in database." % l2_name) l2invs.extend(l2inv) for l2inv in l2invs: action = api_actions.AttachL2NetworkToClusterAction() action.sessionUuid = session_uuid action.clusterUuid = cinv.uuid action.l2NetworkUuid = l2inv.uuid thread = threading.Thread(target=_thread_for_action, args=(action,)) wait_for_thread_queue() thread.start() def _deploy_cluster(zone): if not xmlobject.has_element(zone, "clusters.cluster"): return if zone.duplication__ == None: zone_duplication = 1 else: zone_duplication = int(zone.duplication__) for zone_ref in range(zone_duplication): for cluster in xmlobject.safe_list(zone.clusters.cluster): if cluster_name and cluster_name != cluster.name_: continue if cluster.duplication__ == None: cluster_duplication = 1 else: cluster_duplication = int(cluster.duplication__) for cluster_ref in range(cluster_duplication): action = api_actions.CreateClusterAction() action.sessionUuid = session_uuid action.name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c') action.description = generate_dup_name(generate_dup_name(cluster.description__, zone_ref, 'z'), cluster_ref, 'c') action.hypervisorType = cluster.hypervisorType_ zone_name = generate_dup_name(zone.name_, zone_ref, 'z') zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name) zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'Cluster') action.zoneUuid = zinv.uuid thread = threading.Thread(target=_add_cluster, args=(action, zone_ref, cluster, cluster_ref, )) wait_for_thread_queue() thread.start() for zone in xmlobject.safe_list(deployConfig.zones.zone): if zone_name and zone_name != zone.name_: continue _deploy_cluster(zone) wait_for_thread_done()