def test(): test_util.test_dsc('Create test vm to test zstack upgrade by -u.') image_name = os.environ.get('imageName_i_c7') vm = test_stub.create_vlan_vm(image_name) test_obj_dict.add_vm(vm) if os.environ.get('zstackManagementIp') == None: vm.check() else: time.sleep(60) vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip target_file = '/root/zstack-all-in-one.tgz' test_stub.prepare_test_env(vm_inv, target_file) ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip test_stub.copy_id_dsa(vm_inv, ssh_cmd, tmp_file) test_stub.copy_id_dsa_pub(vm_inv) test_stub.execute_all_install(ssh_cmd, target_file, tmp_file) test_stub.check_installation(ssh_cmd, tmp_file, vm_inv) test_stub.upgrade_zstack(ssh_cmd, target_file, tmp_file) test_stub.check_installation(ssh_cmd, tmp_file, vm_inv) os.system('rm -f %s' % tmp_file) vm.destroy() test_util.test_pass('ZStack upgrade Test Success')
def test(): os.environ['ZSTACK_THREAD_THRESHOLD']='1000' os.environ['ZSTACK_TEST_NUM']='1000' test_lib.lib_set_provision_memory_rate(20) test_lib.lib_set_provision_storage_rate(20) lib_set_provision_cpu_rate(20) Create() time.sleep(180) create_vm_begin_time = get_begin_time() create_vm_end_time = get_end_time() print ("begin time = %s") % create_vm_begin_time print ("end time = %s") % create_vm_end_time if create_vm_end_time != 0 and create_vm_begin_time != 0: create_1000_vm_time = create_vm_end_time - create_vm_begin_time test_util.test_dsc("create_vm_time is "+str(create_1000_vm_time)) Destroy_VM() time.sleep(180) Expunge_VM() time.sleep(180) zone_name = os.environ.get('zoneName') zone = res_ops.get_resource(res_ops.ZONE, name = zone_name)[0] zone_ops.delete_zone(zone.uuid) test_util.test_pass('Create 1000 vms success,takes %s time' % create_1000_vm_time)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to runnning when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() vm.suspend() vm.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) test_util.test_pass('PS disable mode Test Success')
def test(): global ipsec global vip1_uuid global vpc_vr cond = res_ops.gen_query_conditions('name', '=', 'public network') public_network = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vip1 = test_stub.create_vip('vip_ipsec', public_network.uuid) vip1_uuid = vip1.get_vip().uuid test_util.test_dsc('Create vpc vr and attach networks') vpc_vr = test_stub.create_vpc_vrouter() cond = res_ops.gen_query_conditions('name', '=', 'l3VlanNetwork11') l3_vlan_network11 = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vpc_vr.add_nic(l3_vlan_network11.uuid) peer_address = '10.94.10.10' try: ipsec = ipsec_ops.create_ipsec_connection('ipsec', None, peer_address, '123456', vip1_uuid, None) except: test_util.test_fail('Failed to create vpc ipsec') test_stub.delete_vip(vip1_uuid) vpc_vr.destroy() ipsec_ops.delete_ipsec_connection(ipsec.uuid) test_util.test_pass('Create VPC Ipsec Success')
def test(): test_util.test_dsc(''' Will doing random test operations, including vm create/stop/start/reboot /destroy, volume create/attach/detach/delete. It doesn't include SG VIP and snapshots operations. If reach max 4 coexisting running vm, testing will success and quit. ''') target_running_vm = 4 test_util.test_dsc('Random Test Begin. Test target: 4 coexisting running VM (not include VR).') robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) robot_test_obj.set_exclusive_actions_list(\ test_state.TestAction.sg_actions \ + test_state.TestAction.vip_actions \ + test_state.TestAction.snapshot_actions) priority_actions = test_state.TestAction.volume_actions * 4 priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) rounds = 1 while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: print "test_dict: %s" % test_dict test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc('Round %s finished. Begin status checking.' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Create random VM Test Success')
def test(): test_util.test_dsc(''' Test Description: Will create 1 VM with 3 l3 networks. 1 l3_network is not using VR; 1 l3_network is using novlan VR; 1 l3_network is using vlan VR. Resource required: Need support 3 VMs (1 test VM + 2 VR VMs) existing at the same time. This test required a special image, which was configed with at least 3 enabled NICs (e.g. eth0, eth1, eth2). ''') image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list = [l3_net_uuid] l3_name = os.environ.get('l3VlanNetworkName3') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list.append(l3_net_uuid) l3_name = os.environ.get('l3VlanNetworkName4') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list.append(l3_net_uuid) vm = test_stub.create_vm(l3_net_list, image_uuid, '3_l3_vm') test_obj_dict.add_vm(vm) vm.check() if len(vm.vm.vmNics) == 3: test_util.test_logger("Find 3 expected Nics in new created VM.") else: test_util.test_fail("New create VM doesn't not have 3 Nics. It only have %s" % len(vm.get_vm().vmNics)) vm.destroy() test_util.test_pass('Create 1 VM with 3 l3_network (1 vlan VR, 1 novlan VR and 1 no VR L3network) successfully.')
def test(): test_util.test_dsc('Test storage capacity when using expunge vm') if conf_ops.get_global_config_value('vm', 'deletionPolicy') != 'Delay' : test_util.test_skip('vm delete_policy is not Delay, skip test.') return zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit = 1) if not ps: test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' ) return True host = host[0] ps = ps[0] host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap = host_res.availableCapacity vm = test_stub.create_vm(vm_name = 'basic-test-vm', host_uuid = host.uuid) test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() host_res2 = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap2 = host_res.availableCapacity if avail_cap != avail_cap2: test_util.test_fail('PS capacity is not same after create/expunge vm on host: %s. Capacity before create vm: %s, after expunge vm: %s ' % (host.uuid, avail_cap, avail_cap2)) test_util.test_pass('Expunge VM Test Success')
def test(): global vm test_util.test_dsc('create VM with setting password') for root_password in root_password_list: test_util.test_dsc("root_password: \"%s\"" %(root_password)) #vm = test_stub.create_vm(vm_name = 'c7-vm-no-sys-tag', image_name = "imageName_i_c7_no_tag", root_password=root_password) vm = test_stub.create_vm(vm_name = 'c7-vm-no-sys-tag', image_name = "imageName_i_c7_no_tag") backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') #if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): # test_util.test_fail("create vm with root password: %s failed", root_password) # stop vm && change vm password #vm.stop() vm.check() try: vm_ops.change_vm_password(vm.get_vm().uuid, "root", root_password) except Exception, e: if "CreateSystemTag" in str(e): test_util.test_pass("negative test of change a no system tag image passed.") else: test_util.test_fail("negative test failed with not expected log: %s", str(e))
def test(): global vm_inv test_util.test_dsc('Create test vm to test zstack upgrade by -u.') image_name = os.environ.get('imageName_i_c7_z_1.9') #iso_path = os.environ.get('iso_path') zstack_latest_version = os.environ.get('zstackLatestVersion') zstack_latest_path = os.environ.get('zstackLatestInstaller') vm_name = os.environ.get('vmName') #upgrade_script_path = os.environ.get('upgradeScript') vm_inv = test_stub.create_vm_scenario(image_name, vm_name) vm_ip = vm_inv.vmNics[0].ip test_lib.lib_wait_target_up(vm_ip, 22) test_stub.make_ssh_no_password(vm_ip, tmp_file) test_util.test_logger('Update MN IP') test_stub.update_mn_hostname(vm_ip, tmp_file) test_stub.update_mn_ip(vm_ip, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_installation(vm_ip, tmp_file) test_util.test_logger('Upgrade zstack to latest with repo') #test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path) test_stub.update_repo(vm_ip, tmp_file) test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file) test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_mn_running(vm_ip, tmp_file) test_stub.check_installation(vm_ip, tmp_file) os.system('rm -f %s' % tmp_file) test_stub.destroy_vm_scenario(vm_inv.uuid) test_util.test_pass('ZStack upgrade Test Success')
def test(): global test_obj_dict #volume_creation_option = test_util.VolumeOption() #test_util.test_dsc('Create volume and check') #disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) #volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume = test_stub.create_volume(volume_creation_option) bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume_uuid = volume1.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid volume1.attach(vm) volume1.detach(vm_uuid) vm.stop() image_obj = volume1.create_template([bss[0].uuid]) vm.start() host_uuid = vm.vm.hostUuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) volume2 = image_obj.create_data_volume(ps.uuid, 'volumeName', host_uuid) test_obj_dict.add_volume(volume2) volume2.check() volume_uuid = volume2.volume.uuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume from Template with Snapshot Test Success')
def test(): test_util.test_dsc('Test Change VM Image In Multihosts Env') global vm image = test_lib.lib_get_image_by_name("centos") vm = test_stub.create_vm(image_uuid=image.uuid) last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) last_primarystorage_uuid = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid last_host_uuid = test_lib.lib_get_vm_last_host(vm.get_vm()).uuid image_uuid = test_lib.lib_get_image_by_name("image_for_sg_test").uuid vm_uuid = vm.get_vm().uuid host_ops.change_host_state(host_uuid = last_host_uuid, state = 'disable') vm_ops.stop_vm(vm_uuid) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #Disable vm's host.If ps is shared storage,the vm will be started on another host that meets the conditions and the operation of changing vm image will success. if ps.type != 'LocalStorage': vm_ops.change_vm_image(vm_uuid,image_uuid) vm_ops.start_vm(vm_uuid) #check whether the network config has changed l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if l3network_uuid_after != last_l3network_uuid: test_util.test_fail('Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after != last_primarystorage_uuid: test_util.test_fail('Change VM Image Failed.Primarystorage has changed.') vm.destroy() test_util.test_pass('Change Vm Image Test Success In Multihosts Env Success') #Disable vm's host.If ps is local storage,the operation of changing vm image will fail. else: try: vm_ops.change_vm_image(vm_uuid, image_uuid) except: test_util.test_pass('Change Vm Image Test Success In Multihosts Env Success') test_util.test_fail('Test Change VM Image In Multihosts Env Success Failed')
def test(): global vm test_util.test_dsc('create VM with setting password') for root_password in root_password_list: test_util.test_dsc("root_password: \"%s\"" %(root_password)) vm = test_stub.create_vm(vm_name = 'u13-vm', image_name = "imageName_i_u13", root_password=root_password) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): test_util.test_fail("create vm with root password: %s failed", root_password) vm.destroy() vm.check() vm.expunge() vm.check() test_util.test_pass('Set password when VM is creating is successful.')
def test(): test_util.test_dsc('Create test vm and check') vm1 = test_stub.create_vm(vm_name="vm1", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm(vm_name="vm2", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm2) vm1.check() vm2.check() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) normal_volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(normal_volume) normal_volume.check() volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) sharable_volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(sharable_volume) sharable_volume.check() normal_volume.check() test_util.test_dsc('Attach volume and check') #mv vm checker later, to save some time. normal_volume.attach(vm2) sharable_volume.attach(vm1) sharable_volume.attach(vm2) sharable_volume.check() normal_volume.check() config_ocfs2_vms(vm1, vm2) check_sharable_volume(vm1, vm2) test_util.test_dsc('Detach volume and check') sharable_volume.detach(vm1.get_vm().uuid) sharable_volume.detach(vm2.get_vm().uuid) normal_volume.detach(vm2.get_vm().uuid) sharable_volume.check() normal_volume.check() test_util.test_dsc('Delete volume and check') sharable_volume.delete() sharable_volume.expunge() normal_volume.delete() normal_volume.expunge() sharable_volume.check() normal_volume.check() test_obj_dict.rm_volume(sharable_volume) test_obj_dict.rm_volume(normal_volume) vm1.destroy() vm2.destroy() vm1.check() vm2.check() vm1.expunge() vm2.expunge() test_util.test_pass('Create Data Volume for VM Test Success')
def input(self, label, content): css_selector = 'label[for="%s"]' % label selection_rendered = 'ant-select-selection__rendered' def select_opt(elem, opt_value): elem.get_element(selection_rendered).click() for opt in self.get_elements('li[role="option"]'): if opt.displayed() and opt_value in opt.text: opt.click() def input_content(elem, content): element = elem.get_element('input', 'tag name') element.input(content) title = None for elem in self.get_elements('ant-row ant-form-item'): title_elem = elem.get_elements(css_selector) if title_elem: title = title_elem[0].text.encode('utf-8') break if isinstance(content, types.ListType): input_content(elem, content[0]) select_opt(elem, content[1]) else: if elem.get_elements(selection_rendered): select_opt(elem, content) else: test_util.test_dsc('input [%s] for [%s]' % (content, title)) input_content(elem, content)
def test(): test_util.test_dsc('Create test vm with EIP and check.') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) pri_l3_name = os.environ.get('l3VlanNetworkName1') pri_l3_uuid = test_lib.lib_get_l3_by_name(pri_l3_name).uuid pub_l3_name = os.environ.get('l3PublicNetworkName') pub_l3_uuid = test_lib.lib_get_l3_by_name(pub_l3_name).uuid vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid vip = test_stub.create_vip('create_eip_test', pub_l3_uuid) test_obj_dict.add_vip(vip) eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm) vip.attach_eip(eip) vm.check() if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') vm.destroy() test_obj_dict.rm_vm(vm) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('not expected to be able to ping vip while it succeed') eip.delete() vip.delete() test_obj_dict.rm_vip(vip) test_util.test_pass('Create EIP for VM Success')
def test(): test_util.test_dsc('Test VM online change instance offering') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) cpuNum = 2 memorySize = 666 * 1024 * 1024 new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySize) test_obj_dict.add_instance_offering(new_offering) new_offering_uuid = new_offering.uuid vm.change_instance_offering(new_offering_uuid) vm.check() vm.reboot() vm.check() cpuNum = 1 memorySize = 555 * 1024 * 1024 new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySize) test_obj_dict.add_instance_offering(new_offering) new_offering_uuid = new_offering.uuid vm.change_instance_offering(new_offering_uuid) vm.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM online change instance offering Test Pass')
def test(): test_util.test_dsc('Test update instance offering') cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True host_uuid = host[0].uuid new_offering = test_lib.lib_create_instance_offering(cpuNum = 1, \ cpuSpeed = 16, memorySize = 536870912, name = 'orgin_instance_name') test_obj_dict.add_instance_offering(new_offering) vm = test_stub.create_vm(vm_name = 'test_update_instance_offering', \ host_uuid = host_uuid, \ instance_offering_uuid = new_offering.uuid) test_obj_dict.add_vm(vm) vm.stop() #These parameters are need to be populated. updated_offering = test_lib.lib_update_instance_offering(new_offering.uuid, cpuNum = 2, cpuSpeed = 16, \ memorySize = 1073741824, name = 'updated_instance_name', \ volume_iops = None, volume_bandwidth = None, \ net_outbound_bandwidth = None, net_inbound_bandwidth = None) vm.start() vm.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test updated instance offering Pass')
def check_detach_l2(pre_cluster_uuid, l2_uuid, vm, is_other_cluster): l2 = res_ops.get_resource(res_ops.L2_NETWORK, uuid = l2_uuid)[0] attached_clusters = l2.attachedClusterUuids if pre_cluster_uuid in attached_clusters: test_util.test_fail('[cluster:] %s is still in [l2:] %s attached list.'\ % (pre_cluster_uuid, l2_uuid)) test_util.test_dsc('start vm again. vm should be started in different cluster, if there has.') if attached_clusters : if not is_other_cluster: test_util.test_fail('There should not be available cluster for [l2:] %s. But catch some.' % l2_uuid) vm.start() new_cluster_uuid = vm.get_vm().clusterUuid if new_cluster_uuid == pre_cluster_uuid : test_util.test_fail('\ VM start on old [cluster]: %s, which is detached by [l2:] %s ' \ % (vm.get_vm().uuid, new_cluster_uuid, l2_uuid)) vm.check() else: if is_other_cluster: test_util.test_fail('There should be available cluster for [l2:] %s. But did not catch.' % l2_uuid) #no cluster is attached with l2. vm will start failure. try: vm.start() except: test_util.test_logger('\ Expected: VM start failed, since there is not cluster is attached to [l2]: %s, \ after [cluster:] %s is detached' % (l2_uuid, pre_cluster_uuid)) else: test_util.test_fail('[vm]: %s is Wrongly started up, since there is\ not cluster is attached with [l2]: %s, after previous detaching ops' % \ (vm.get_vm().uuid, l2_uuid))
def test(): global volume_offering_uuid test_util.test_dsc('Test VM data volume bandwidth QoS by 20MB') #unit is KB write_bandwidth = 5*1024*1024 new_volume_offering = test_lib.lib_create_disk_offering(write_bandwidth = write_bandwidth) volume_offering_uuid = new_volume_offering.uuid vm = test_stub.create_vm(vm_name='vm_volume_qos', disk_offering_uuids = [volume_offering_uuid]) vm.check() test_obj_dict.add_vm(vm) vm_inv = vm.get_vm() cond = res_ops.gen_query_conditions("vmInstanceUuid", '=', vm_inv.uuid) cond = res_ops.gen_query_conditions("type", '=', 'Data', cond) volume_uuid = res_ops.query_resource(res_ops.VOLUME, cond)[0].uuid test_lib.lib_mkfs_for_volume(volume_uuid, vm_inv) path = '/mnt' user_name = 'root' user_password = '******' os.system("sshpass -p '%s' ssh %s@%s 'mount /dev/vdb1 %s'"%(user_password, user_name, vm_inv.vmNics[0].ip, path)) vm.check() test_stub.make_ssh_no_password(vm_inv) test_stub.install_fio(vm_inv) if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthRead != -1 and \ vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthWrite != write_bandwidth: test_util.test_fail('Retrieved disk qos not match') test_stub.test_fio_bandwidth(vm_inv, write_bandwidth, path) if test_stub.test_fio_bandwidth(vm_inv, write_bandwidth/2, '/dev/vdb', raise_exception=False): test_util.test_fail('disk read qos is not expected to have limit as only write qos was set') vol_ops.delete_disk_offering(volume_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM data volume write QoS Test Pass')
def test(): test_util.test_dsc('Test VM online change instance offering') cpuNum = 1 memorySize = 555 * 1024 * 1024 new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySize) vm = test_stub.create_vm(vm_name = 'ckvmoffering-c7-64', image_name = "imageName_i_c7", instance_offering_uuid=new_offering.uuid) vm.check() test_obj_dict.add_vm(vm) test_obj_dict.add_instance_offering(new_offering) cpuNum = 1 memorySize = 667 * 1024 * 1024 new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySize) test_obj_dict.add_instance_offering(new_offering) new_offering_uuid = new_offering.uuid vm.change_instance_offering(new_offering_uuid) vm.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM online change instance offering Test Pass')
def test(): test_util.test_dsc("create vr vm and vpc vrouter") vm = test_stub.create_vr_vm('vr_vm','imageName_s','l3NoVlanNetworkName2') test_obj_dict.add_vm(vm) if not test_lib.lib_find_vr_by_vm(vm.vm) or not test_lib.lib_find_vr_by_vm(vm.vm)[0]: test_lib.lib_error_cleanup(test_obj_dict) test_util.test_skip("skip the test for no vr found in the env.") vpc_vr = test_stub.create_vpc_vrouter() check_host_prometheus_conf() check_vrouter_prometheus_conf() check_prometheus_data() hosts = test_lib.lib_get_all_hosts_from_plan() for host in hosts: host_ops.reconnect_host(host_uuid) check_host_prometheus_conf() check_vrouter_prometheus_conf() check_prometheus_data() test_lib.lib_execute_ssh_cmd(mn_ip,"root","password","zstack-ctl restart_node",timeout=300) check_host_prometheus_conf() check_vrouter_prometheus_conf() check_prometheus_data() test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test prometheus Success')
def test(): test_util.test_logger("start dhcp test for l3 public network") test_util.test_dsc("get no vlan network uuid") private_network = test_stub_dhcp.Private_IP_For_Dhcp() private_network.set_l2_query_resource(l2_query_resource) private_network.set_l2_type(type_l2[1]) l2_no_vlan_uuid = private_network.get_l2uuid() test_util.test_logger("antony @@@debug : %s" % (l2_no_vlan_uuid)) test_util.test_logger("create l3 network") private_network.set_ipVersion(ip_Version[0]) private_network.create_l3uuid(l3_name) test_util.test_logger( "antony @@@debug : %s" % (private_network.get_l3uuid())) private_network.add_service_to_l3network() test_util.test_logger("add ip v4 range to l3 network") private_network.add_ip_by_networkcidr( ip_range_name, networkcidr, dhcp_system_tags) if private_network.check_dhcp_ipaddress().find(dhcp_ip_for_private) == -1: test_util.test_fail("dhcp server ip create fail") test_util.test_logger("delete l3 network") private_network.del_l3uuid() test_util.test_pass("dhcp server ip create successfully")
def test(): pf_vm1 = test_stub.create_dnat_vm() test_obj_dict.add_vm(pf_vm1) l3_name = os.environ.get('l3VlanNetworkName1') vr1 = test_stub.create_vr_vm(test_obj_dict, l3_name) vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1) pf_vm1.check() vm_nic1 = pf_vm1.vm.vmNics[0] vm_nic_uuid1 = vm_nic1.uuid pri_l3_uuid = vm_nic1.l3NetworkUuid vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0] vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr) l3_uuid = vr_pub_nic.l3NetworkUuid vip = test_stub.create_vip('pf_attach_test', l3_uuid) test_obj_dict.add_vip(vip) vip_uuid = vip.get_vip().uuid test_util.test_dsc("attach, detach and delete pf for many times") for i in range(1, 451): test_util.test_logger('round %s' % (i)) starttime = datetime.datetime.now() pf_creation_opt1 = PfRule.generate_pf_rule_option(vr1_pub_ip, protocol=inventory.TCP, vip_target_rule=Port.rule5_ports, private_target_rule=Port.rule5_ports, vip_uuid=vip_uuid) pf_creation_opt1.set_vip_ports(i, i) pf_creation_opt1.set_private_ports(i, i) test_pf1 = zstack_pf_header.ZstackTestPortForwarding() test_pf1.set_creation_option(pf_creation_opt1) test_pf1.create() vip.attach_pf(test_pf1) if i < 151: test_pf1.attach(vm_nic_uuid1, pf_vm1) pf_dict[i] = test_pf1.get_port_forwarding().uuid elif i < 301: test_pf1.attach(vm_nic_uuid1, pf_vm1) test_pf1.detach() pf_dict[i] = test_pf1.get_port_forwarding().uuid else : test_pf1.attach(vm_nic_uuid1, pf_vm1) test_pf1.detach() test_pf1.delete() endtime = datetime.datetime.now() optime = (endtime - starttime).seconds test_util.test_dsc("round %s, pf operation time: %s" % (i, optime)) test_util.test_logger("the pf operation time is %s seconds" % optime) if optime > 240: test_util.test_fail("the pf operation time is %s seconds, more than 240 seconds" % optime) vip.delete() test_obj_dict.rm_vip(vip) pf_vm1.destroy() test_obj_dict.rm_vm(pf_vm1) for j in pf_dict: net_ops.delete_port_forwarding(pf_dict[j]) test_util.test_pass("Test Port Forwarding Attach/Detach Successfully")
def test(): global test_obj_dict test_util.test_dsc('Create a VM with 3 additional data volumes with 1 of them using virtio-scsi') disk_offering1 = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [] for i in range(0, 8): disk_offering_uuids.append(disk_offering1.uuid) disk_offering2 = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) for i in range(0, 14): disk_offering_uuids.append(disk_offering2.uuid) vm = test_stub.create_vlan_vm(system_tags=["virtio::diskOffering::%s::num::14" % (disk_offering2.uuid) ,"virtio::diskOffering::%s::num::14" % (disk_offering1.uuid)], l3_name=os.environ.get('l3VlanNetworkName1'), disk_offering_uuids=disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 23: test_util.test_fail('Did not find 23 volumes for [vm:] %s. But we assigned 22 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 23 volumes for [vm:] %s.' % vm.vm.uuid) scsi_cmd = 'ls /dev/sd* | wc -l' if test_lib.lib_execute_command_in_vm(vm.get_vm(), scsi_cmd).strip() != '22': test_util.test_fail('Only expect 22 disk in virtio scsi mode') vm.destroy() test_util.test_pass('Create a VM with 22 additional data volumes with 22 of them using virtio-scsi PASS')
def test(): global vms, ts test_util.test_dsc('create VM with setting password') for root_password in root_password_list: test_util.test_dsc("root_password: \"%s\"" %(root_password)) for i in range(vm_num): vm_name = "VM%s" %(str(i)) t = threading.Thread(target=create_vm_wrapper, args=('c7-'+vm_name, "batch_test_image", root_password)) ts.append(t) t.start() for t in ts: t.join() for vm in vms: if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): test_util.test_fail("create vm with root password: %s failed", root_password) vm.destroy() vm.check() vm.expunge() vm.check() test_util.test_pass('Set password when VM is creating is successful.')
def test(): global test_obj_dict test_util.test_dsc("Create 1 VMs with vlan VR L3 network for SG testing.") vm1 = test_stub.create_sg_vm() test_obj_dict.add_vm(vm1) vm1.check() nic_uuid = vm1.vm.vmNics[0].uuid vm_nics = (nic_uuid, vm1) l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0] vm1_ip = test_lib.lib_get_vm_nic_by_l3(vr_vm, l3_uuid).ip target_ip_prefix = '10.10.10.' test_util.test_dsc("Create security groups.") for i in range(sg_num): target_ip = '%s%s' % (target_ip_prefix, str(1+i)) rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, target_ip) rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, target_ip) rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, target_ip) rule4 = test_lib.lib_gen_sg_rule(Port.rule4_ports, inventory.TCP, inventory.INGRESS, target_ip) rule5 = test_lib.lib_gen_sg_rule(Port.rule5_ports, inventory.TCP, inventory.INGRESS, target_ip) sg = test_stub.create_sg() test_obj_dict.add_sg(sg.security_group.uuid) sg.add_rule([rule1, rule2, rule3, rule4, rule5]) sg_vm.attach(sg, [vm_nics]) time.sleep(3) #need regularlly clean up log files in virtual router when doing stress test test_lib.lib_check_cleanup_vr_logs_by_vm(vm1.vm) #clean up all vm and sg test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create/Destroy VM with VR successfully')
def env_recover(): test_util.test_dsc("Destroy test object") test_lib.lib_error_cleanup(test_obj_dict) if new_ps_list: for new_ps in new_ps_list: ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0]) ps_ops.delete_primary_storage(new_ps.uuid)
def test(): test_util.test_dsc('Create test vm with static ip address and check. VR has DNS SNAT EIP PF and DHCP services') l3_name = os.environ.get('l3VlanNetworkName1') l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid ip_address = net_ops.get_free_ip(l3_uuid)[0].ip static_ip_system_tag = test_lib.lib_create_vm_static_ip_tag(l3_uuid, \ ip_address) vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'), system_tags=[static_ip_system_tag]) test_obj_dict.add_vm(vm) vm.stop() cond = res_ops.gen_query_conditions('tag', '=', static_ip_system_tag) system_tag = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)[0] ip_address2 = net_ops.get_free_ip(l3_uuid)[0].ip static_ip_system_tag2 = test_lib.lib_create_vm_static_ip_tag(l3_uuid, \ ip_address2) vm_ops.change_vm_static_ip(vm.get_vm().uuid, l3_uuid, ip_address2) vm.start() if ip_address2 != vm.get_vm().vmNics[0].ip: test_util.test_fail('VM static ip test failed') vm.check() vm.destroy() test_util.test_pass('Create VM with static IP and change static IP Test Success')
def test(): global new_offering_uuid test_util.test_dsc('Test VM network bandwidth QoS by 1MB') #unit is KB net_bandwidth1 = 1024 new_offering1 = test_lib.lib_create_instance_offering(net_bandwidth = net_bandwidth1) test_obj_dict.add_instance_offering(new_offering1) new_offering_uuid = new_offering1.uuid vm = test_stub.create_vm(vm_name = 'vm_net_qos', \ instance_offering_uuid = new_offering1.uuid) test_obj_dict.add_vm(vm) vm.stop() net_bandwidth2 = 512 new_offering2 = test_lib.lib_create_instance_offering(net_bandwidth = net_bandwidth2) test_obj_dict.add_instance_offering(new_offering2) new_offering_uuid = new_offering2.uuid vm_inv = vm.get_vm() vm.change_instance_offering(new_offering_uuid) vm.start() vm.check() import time time.sleep(1) test_stub.make_ssh_no_password(vm_inv) test_stub.create_test_file(vm_inv, net_bandwidth2) test_stub.test_scp_speed(vm_inv, net_bandwidth2) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM Network QoS change instance offering Test Pass')
def test(): global default_l3_mtu test_util.test_dsc('Create test vm and check. VR has DNS SNAT EIP PF and DHCP services') l3_name = os.environ.get('l3PublicNetworkName') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid default_l3_mtu = net_ops.get_l3_mtu(l3_net_uuid) vm = test_stub.create_vlan_vm(l3_name) test_obj_dict.add_vm(vm) vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu %s"' % (default_l3_mtu)) script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) net_ops.set_l3_mtu(l3_net_uuid, 1200) vm.reboot() vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu 1200"') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) vm.destroy() test_util.test_pass('Create VirtualRouter VM DNS DHCP SANT EIP PF Test Success')
def test(): vm = test_stub.create_vm(vm_name='basic-test-vm', image_name='image_for_sg_test') test_obj_dict.add_vm(vm) vm1 = test_stub.create_vm(vm_name='basic-test-vm1', image_name='image_for_sg_test') test_obj_dict.add_vm(vm1) image_creation_option = test_util.ImageOption() backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm1.vm) for bs in backup_storage_list: if bs.type in [ inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE ]: image_creation_option.set_backup_storage_uuid_list( [backup_storage_list[0].uuid]) break else: vm.destroy() vm1.destroy() test_util.test_skip( 'Not find image store or ceph type backup storage.') #vm1.check() vm_root_volume_inv = test_lib.lib_get_root_volume(vm1.get_vm()) root_image_uuid = vm_root_volume_inv.rootImageUuid vm_img_inv = test_lib.lib_get_image_by_uuid(root_image_uuid) test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(vm_root_volume_inv.uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_root_snapshot1') snapshot1 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_root_snapshot2') image_creation_option.set_root_volume_uuid(vm_root_volume_inv.uuid) image_creation_option.set_name('test_create_vm_images_vm1') #image_creation_option.set_platform('Linux') # bs_type = backup_storage_list[0].type # if bs_type == 'Ceph': # origin_interval = conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', '1') image1 = test_image.ZstackTestImage() image1.set_creation_option(image_creation_option) image1.create() test_obj_dict.add_image(image1) image1.check() vm2 = test_stub.create_vm(image_name='test_create_vm_images_vm1') test_obj_dict.add_vm(vm2) #do vm check before snapshot check vm.check() vm1.stop() snapshots.check() snapshots.use_snapshot(snapshot1) vm1.start() snapshots.create_snapshot('create_root_snapshot1.1') image_creation_option.set_name('test_create_vm_images_vm2') image2 = test_image.ZstackTestImage() image2.set_creation_option(image_creation_option) image2.create() test_obj_dict.add_image(image2) image2.check() vm3 = test_stub.create_vm(image_name='test_create_vm_images_vm2') test_obj_dict.add_vm(vm3) snapshots.check() vm2.check() vm3.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create VM Image in Image Store Success')
def test(): test_util.test_skip('Time cases need further polish, skip test right now') vm_name = 'vm_' + key_gen(7) begin_time = int(time.time() * 1000) vm = test_stub.create_vm(vm_name) test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm.check() [select_bs_time, allocate_host_time, allocate_ps_time, local_storage_allocate_capacity_time,\ allocate_volume_time, allocate_nic_time, instantiate_res_pre_time, create_on_hypervisor_time,\ instantiate_res_post_time] = test_stub.get_stage_time(vm_name, begin_time) test_util.test_dsc("select_bs_time is " + str(select_bs_time)) test_util.test_dsc("allocate_host_time is " + str(allocate_host_time)) test_util.test_dsc("allocate_ps_time is " + str(allocate_ps_time)) test_util.test_dsc("local_storage_allocate_capacity_time is " + str(local_storage_allocate_capacity_time)) test_util.test_dsc("allocate_volume_time is " + str(allocate_volume_time)) test_util.test_dsc("allocate_nic_time is " + str(allocate_nic_time)) test_util.test_dsc("instantiate_res_pre_time is " + str(instantiate_res_pre_time)) test_util.test_dsc("create_on_hypervisor_time is " + str(create_on_hypervisor_time)) test_util.test_dsc("instantiate_res_post_time is " + str(instantiate_res_post_time)) if select_bs_time > 10: test_util.test_fail('select_bs_time is bigger than 10 milliseconds') if allocate_host_time > 190: test_util.test_fail( 'allocate_host_time is bigger than 190 milliseconds') if allocate_ps_time > 70: test_util.test_fail('allocate_ps_time is bigger than 70 milliseconds') if local_storage_allocate_capacity_time > 70: test_util.test_fail( 'local_storage_allocate_capacity_time is bigger than 70 milliseconds' ) if allocate_volume_time > 90: test_util.test_fail( 'allocate_volume_time is bigger than 90 milliseconds') if allocate_nic_time > 70: test_util.test_fail('allocate_nic_time is bigger than 70 milliseconds') if instantiate_res_pre_time > 1300: test_util.test_fail( 'instantiate_res_pre_time is bigger than 1300 milliseconds') if create_on_hypervisor_time > 2500: test_util.test_fail( 'create_on_hypervisor_time is bigger than 2500 milliseconds') if instantiate_res_post_time > 30: test_util.test_fail( 'instantiate_res_post_time is bigger than 30 milliseconds') vm.destroy() test_util.test_pass('Create VM and Check time for Each Stage Test Success')
def test(): def test_fail(msg): os.system('rm -f %s' % tmp_file) test_util.test_fail(msg) test_util.test_dsc( 'Create 2 CentOS6 vm to test zstack installation. Rabbitmq server is on different node with zstack management node. ZStack management node will also be started in 2 hosts.' ) image_name = os.environ.get('imageName_i_c6') vm1 = test_stub.create_vlan_vm(image_name) test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vlan_vm(image_name) test_obj_dict.add_vm(vm2) vm1.check() vm2.check() vm1_inv = vm1.get_vm() vm1_ip = vm1_inv.vmNics[0].ip vm2_inv = vm2.get_vm() vm2_ip = vm2_inv.vmNics[0].ip target_file = '/root/zstack-all-in-one.tgz' test_stub.prepare_test_env(vm1_inv, target_file) ssh_cmd1 = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm1_ip ssh_cmd2 = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm2_ip test_stub.only_install_zstack(ssh_cmd1, target_file, tmp_file) test_stub.copy_id_dsa(vm1_inv, ssh_cmd1, tmp_file) test_stub.copy_id_dsa_pub(vm2_inv) cmd = '%s "zstack-ctl install_db --host=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack install db failed in vm:%s' % vm2_inv.uuid) test_util.test_dsc("install db on vm2 success") cmd = '%s "zstack-ctl deploydb --host=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack deploy db failed in vm:%s' % vm2_inv.uuid) test_util.test_dsc("deploy db on vm2 success") cmd = '%s "zstack-ctl install_rabbitmq --host=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack install rabbitmq failed in vm:%s' % vm1_inv.uuid) test_util.test_dsc("install rabbitmq on vm2 success") cmd = '%s "rabbitmqctl add_user zstack zstack123; rabbitmqctl set_user_tags zstack administrator; rabbitmqctl change_password zstack zstack123; rabbitmqctl set_permissions -p / zstack \\\".*\\\" \\\".*\\\" \\\".*\\\""' % ssh_cmd2 process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack set rabbitmq username/password failed in vm:%s' % vm2_inv.uuid) test_util.test_dsc("set rabbitmq permissions on vm2 success") cmd = '%s "zstack-ctl configure CloudBus.rabbitmqUsername=zstack; zstack-ctl configure CloudBus.rabbitmqPassword=zstack123; zstack-ctl save_config"' % ssh_cmd1 process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('set rabbitmq config failed in vm:%s' % vm1_inv.uuid) test_util.test_dsc("config rabbitmq configure on vm1 success") cmd = '%s "zstack-ctl install_management_node --host=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack install mn failed in vm:%s' % vm2_inv.uuid) test_util.test_dsc("install mn on vm2 success") cmd = '%s "zstack-ctl configure --duplicate-to-remote=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack install mn failed in vm:%s' % vm2_inv.uuid) test_util.test_dsc("duplicate zstack mn configure to vm2 success") cmd = '%s "zstack-ctl configure management.server.ip=%s"' % (ssh_cmd2, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('set zstack-ctl config failed in vm:%s' % vm2_inv.uuid) cmd = '%s "zstack-ctl install_ui --host=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack install ui failed in vm:%s' % vm2_inv.uuid) test_util.test_dsc("install ui to vm2 success") cmd = '%s "zstack-ctl install_ui --host=%s"' % (ssh_cmd1, vm1_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_fail('zstack install ui failed in vm:%s' % vm1_inv.uuid) test_util.test_dsc("install ui to vm1 success") cmd = '%s "zstack-ctl start_node"' % ssh_cmd1 process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: if 'no management-node-ready message received within' in open( tmp_file).read(): times = 30 cmd = '%s "zstack-ctl status"' % ssh_cmd1 while (times > 0): time.sleep(10) process_result = test_stub.execute_shell_in_process( cmd, tmp_file, 10, True) times -= 1 if process_result == 0: test_util.test_logger( "management node1 start after extra %d seconds" % (30 - times + 1) * 10) break else: test_fail('start node failed in vm:%s' % vm1_inv.uuid) cmd = '%s "zstack-ctl start_node --host=%s"' % (ssh_cmd1, vm2_ip) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: if 'no management-node-ready message received within' in open( tmp_file).read(): times = 20 cmd = '%s "zstack-ctl status"' % ssh_cmd2 while (times > 0): time.sleep(10) process_result = test_stub.execute_shell_in_process( cmd, tmp_file, 10, True) times -= 1 if process_result == 0: test_util.test_logger( "management node2 start after extra %d seconds" % (20 - times + 1) * 10) break else: test_fail('start remote node failed in vm:%s' % vm1_inv.uuid) test_stub.check_installation(ssh_cmd1, tmp_file, vm1_inv) test_stub.check_installation(ssh_cmd2, tmp_file, vm2_inv) os.system('rm -f %s' % tmp_file) vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) test_util.test_pass( 'ZStack multi nodes installation Test Success on 2 CentOS6.')
def test(): vm1 = test_stub.create_vr_vm('migrate_vm1', 'imageName_net', 'l3VlanNetwork3') test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vr_vm('migrate_vm2', 'imageName_net', 'l3VlanNetwork3') test_obj_dict.add_vm(vm2) vm3 = test_stub.create_vr_vm('migrate_vm3', 'imageName_net', 'l3VlanNetwork3') test_obj_dict.add_vm(vm3) vm1.check() vm2.check() vm3.check() test_util.test_dsc("Create security groups.") sg1 = test_stub.create_sg() test_obj_dict.add_sg(sg1.security_group.uuid) sg2 = test_stub.create_sg() test_obj_dict.add_sg(sg2.security_group.uuid) sg_vm = test_sg_vm_header.ZstackTestSgVm() sg_vm.check() l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid if not test_lib.lib_find_vr_by_vm(vm1.vm) or not test_lib.lib_find_vr_by_vm(vm1.vm)[0]: test_lib.lib_error_cleanup(test_obj_dict) test_util.test_skip("skip the test for no vr found in the env.") vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0] vm3_ip = test_lib.lib_get_vm_nic_by_l3(vm3.vm, l3_uuid).ip rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, vm3_ip) rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, vm3_ip) rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, vm3_ip) sg1.add_rule([rule1]) sg2.add_rule([rule1, rule2, rule3]) sg_vm.add_stub_vm(l3_uuid, vm3) nic_uuid1 = vm1.vm.vmNics[0].uuid nic_uuid2 = vm2.vm.vmNics[0].uuid vm1_nics = (nic_uuid1, vm1) vm2_nics = (nic_uuid2, vm2) test_util.test_dsc("Add nic to security group 1.") test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports) sg_vm.attach(sg1, [vm1_nics, vm2_nics]) sg_vm.attach(sg2, [vm1_nics, vm2_nics]) test_stub.migrate_vm_to_random_host(vm1) test_stub.migrate_vm_to_random_host(vm2) test_stub.migrate_vm_to_random_host(vm3) vm1.check() vm2.check() vm3.check() sg_vm.check() vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) vm3.destroy() test_obj_dict.rm_vm(vm3) sg_vm.delete_sg(sg1) test_obj_dict.rm_sg(sg1.security_group.uuid) sg_vm.delete_sg(sg2) test_obj_dict.rm_sg(sg2.security_group.uuid) test_util.test_pass('Migrate SG VM Test Success')
def check_installation(ssh_cmd, tmp_file, vm_inv): cmd = '%s "/usr/bin/zstack-cli LogInByAccount accountName=admin password=password"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli login failed') vm_passwd = test_lib.lib_get_vm_password(vm_inv) vm_ip = vm_ip = vm_inv.vmNics[0].ip cmd = '%s "/usr/bin/zstack-cli AddSftpBackupStorage name=bs1 description=bs hostname=%s username=root password=%s url=/home/bs"' % ( ssh_cmd, vm_ip, vm_passwd) process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli create Backup Storage failed') cmd = '%s "/usr/bin/zstack-cli QuerySftpBackupStorage name=bs1"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Query Backup Storage failed') cmd = '%s "/usr/bin/zstack-cli QuerySftpBackupStorage name=bs1 fields=uuid" | grep uuid | awk \'{print $2}\'' % ssh_cmd (process_result, bs_uuid) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Query Backup Storage failed') cmd = '%s "/usr/bin/zstack-cli DeleteBackupStorage uuid=%s"' % ( ssh_cmd, bs_uuid.split('"')[1]) process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Delete Backup Storage failed') # check zone cmd = '%s "/usr/bin/zstack-cli CreateZone name=ZONE1"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Create Zone failed') cmd = '%s "/usr/bin/zstack-cli QueryZone name=ZONE1"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Query Zone failed') cmd = '%s "/usr/bin/zstack-cli QueryZone name=ZONE1 fields=uuid" | grep uuid | awk \'{print $2}\'' % ssh_cmd (process_result, zone_uuid) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Query Zone failed') cmd = '%s "/usr/bin/zstack-cli DeleteZone uuid=%s"' % ( ssh_cmd, zone_uuid.split('"')[1]) process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-cli Delete Zone failed') # check item cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^version\' | awk \'{print $2}\'' % ssh_cmd (process_result, version_info) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get version failed') if '1.3' in version_info or '1.2' in version_info: cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^status\' | awk \'{print $2}\'' % ssh_cmd (process_result, status_info) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get status failed') if not 'Running' in status_info: test_util.test_dsc('zstack is not running, try to start zstack') cmd = '%s "/usr/bin/zstack-ctl start"' % ssh_cmd process_result = process_result = execute_shell_in_process( cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl start failed') time.sleep(5) cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^status\' | awk \'{print $2}\'' % ssh_cmd (process_result, status_info) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get status failed') if not 'Running' in status_info: test_util.test_fail( 'zstack is not running, start zstack failed') test_util.test_dsc('check zstack status, zstack is running') else: cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^MN status\' | awk \'{print $3}\'' % ssh_cmd (process_result, mn_status) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get MN status failed') if not 'Running' in mn_status: test_util.test_dsc( 'management node is not running, try to start management node') cmd = '%s "/usr/bin/zstack-ctl start_node"' % ssh_cmd process_result = process_result = execute_shell_in_process( cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl start_node failed') time.sleep(5) cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^MN status\' | awk \'{print $3}\'' % ssh_cmd (process_result, mn_status) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get MN status failed') if not 'Running' in mn_status: test_util.test_fail( 'management node is not running, start management node failed' ) test_util.test_dsc('check MN, MN is running') cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^UI status\' | awk \'{print $3}\'' % ssh_cmd (process_result, ui_status) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get UI status failed') if not 'Running' in ui_status: test_util.test_dsc('UI is not running, try to start UI') cmd = '%s "/usr/bin/zstack-ctl start_ui"' % ssh_cmd process_result = process_result = execute_shell_in_process( cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl start_ui failed') time.sleep(5) cmd = '%s "/usr/bin/zstack-ctl status" | grep \'^MN status\' | awk \'{print $3}\'' % ssh_cmd (process_result, mn_status) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl get MN status failed') if not 'Running' in mn_status: test_util.test_fail('UI is not running, start UI failed') test_util.test_dsc('check UI, UI is running') cmd = '%s "/usr/bin/zstack-ctl status" | grep ^ZSTACK_HOME | awk \'{print $2}\'' % ssh_cmd (process_result, zstack_home) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl status get ZSTACK_HOME failed') zstack_home = zstack_home[:-1] cmd = '%s "[ -d " %s " ] && echo yes || echo no" ' % (ssh_cmd, zstack_home) (process_result, dir_exist) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check ZSTACK_HOME failed') dir_exist = dir_exist[:-1] if dir_exist == 'no': test_util.test_fail('there is no ZSTACK_HOME') cmd = '%s "/usr/bin/zstack-ctl status" | grep ^zstack.properties | awk \'{print $2}\'' % ssh_cmd (process_result, properties_file) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl status get zstack.properties failed') properties_file = properties_file[:-1] cmd = '%s "[ -f " %s " ] && echo yes || echo no" ' % (ssh_cmd, properties_file) (process_result, file_exist) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check zstack.properties failed') file_exist = file_exist[:-1] if file_exist == 'no': test_util.test_fail('there is no zstack.properties') cmd = '%s "/usr/bin/zstack-ctl status" | grep ^log4j2.xml | awk \'{print $2}\'' % ssh_cmd (process_result, properties_file) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl status get log4j2.xml failed') properties_file = properties_file[:-1] cmd = '%s "[ -f " %s " ] && echo yes || echo no" ' % (ssh_cmd, properties_file) (process_result, file_exist) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check log4j2.xml failed') file_exist = file_exist[:-1] if file_exist == 'no': test_util.test_fail('there is no log4j2.xml') cmd = '%s "/usr/bin/zstack-ctl status" | grep ^\'PID file\' | awk \'{print $3}\'' % ssh_cmd (process_result, properties_file) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl status get PID file failed') properties_file = properties_file[:-1] cmd = '%s "[ -f " %s " ] && echo yes || echo no" ' % (ssh_cmd, properties_file) (process_result, file_exist) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check PID file failed') file_exist = file_exist[:-1] if file_exist == 'no': test_util.test_fail('there is no PID file') cmd = '%s "/usr/bin/zstack-ctl status" | grep ^\'log file\' | awk \'{print $3}\'' % ssh_cmd (process_result, properties_file) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack-ctl status get log file failed') properties_file = properties_file[:-1] cmd = '%s "[ -f " %s " ] && echo yes || echo no" ' % (ssh_cmd, properties_file) (process_result, file_exist) = execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check log file failed') file_exist = file_exist[:-1] if file_exist == 'no': test_util.test_fail('there is no log file')
def add_zone_resource(deploy_config, zone_name): session_uuid = acc_ops.login_as_admin() try: test_util.test_dsc('-------add zone operation-------') dep_ops.add_zone(deploy_config, session_uuid, zone_name=zone_name) test_util.test_dsc('-------add l2 operation-------') dep_ops.add_l2_network(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add primary stroage operation-------') dep_ops.add_primary_storage(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add cluster operation-------') dep_ops.add_cluster(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add host operation-------') dep_ops.add_host(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add l3 operation-------') dep_ops.add_l3_network(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc( '-------add virtual router offering operation-------') dep_ops.add_virtual_router(deploy_config, session_uuid, \ zone_name = zone_name) zone = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name = zone_name)[0] except Exception as e: test_util.test_logger( '[Error] zstack deployment meets exception when adding zone resource .' ) traceback.print_exc(file=sys.stdout) raise e finally: acc_ops.logout(session_uuid) test_util.action_logger('Complete add zone resources for [uuid:] %s' \ % zone.uuid)
def test(): vm = test_stub.create_user_vlan_vm() test_obj_dict.add_vm(vm) vm.check() vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip test_util.test_dsc( 'create test-file-1, test-file-2, test-file-3 and create snapshot1') num = 1 while num <= 3: cmd = 'touch /root/test-file-%s' % num rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to create test-file-%s in VM' % num) num = num + 1 root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) snapshots = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots.set_utility_vm(vm) vm.check() snapshots.create_snapshot('create_root_snapshot1') test_util.test_dsc( 'delete test-file-1, create test-file-4, test-file-5 and create snapshot2' ) cmd = 'rm /root/test-file-1 || echo y' rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to delete test-file-1 in VM') num = 4 while num <= 5: cmd = 'touch /root/test-file-%s' % num rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to create test-file-%s in VM' % num) num = num + 1 root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) snapshots2 = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots2.set_utility_vm(vm) vm.check() snapshots2.create_snapshot('create_root_snapshot2') test_util.test_dsc( 'delete test-file-2, test-file-4, create test-file-6 and create snapshot3' ) num_arr = [2, 4] for i in num_arr: cmd = 'rm /root/test-file-%s || echo y' % i rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to delete test-file-%s in VM' % i) cmd = 'touch /root/test-file-6' rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to create test-file-6 in VM') root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) snapshots3 = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots3.set_utility_vm(vm) vm.check() snapshots3.create_snapshot('create_root_snapshot3') test_util.test_dsc('VM reinit') vm.stop() vm.reinit() vm.update() vm.check() vm.start() num = 1 while num <= 6: cmd = '[ -e /root/test-file-%s ] && echo yes || echo no' % num rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('test-file-%s should not exist' % num) num = num + 1 test_util.test_dsc('VM return to snapshot3') vm.stop() snapshots3.use_snapshot(snapshots.get_current_snapshot()) vm.start() vm.check() num_arr = [1, 2, 4] for i in num_arr: cmd = '[ -e /root/test-file-%s ] && echo yes || echo no' % i rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('test-file-%s should not exist' % i) num_arr = [3, 5, 6] for i in num_arr: cmd = '[ -e /root/test-file-%s ] && echo yes || echo no' % i rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('test-file-%s should exist' % i) vm.destroy() test_util.test_pass('Re-init VM Test Success')
def test(): global new_offering_uuid test_util.test_dsc('Test VM 2nic outbound & inbound bandwidth QoS by 1MB') #unit is KB net_bandwidth = 2 * 1024 vm1 = test_stub.create_vm(vm_name='vm_net_inbound_outbound_qos', l3_name=os.environ.get('l3PublicNetworkName')) l3_net_uuid2 = test_lib.lib_get_l3_by_name( os.environ.get('l3VlanNetworkName1')).uuid test_obj_dict.add_vm(vm1) vm1.check() vm1_inv = vm1.get_vm() test_stub.make_ssh_no_password(vm1_inv) vm1_ip = vm1_inv.vmNics[0].ip vm2 = test_stub.create_vm(vm_name='vm_net_inbound_outbound_qos', l3_name=os.environ.get('l3PublicNetworkName')) test_obj_dict.add_vm(vm2) vm2.check() vm2_inv = vm2.get_vm() vm2_ip = vm2_inv.vmNics[0].ip test_stub.make_ssh_no_password(vm2_inv) test_stub.copy_key_file(vm1_inv) test_stub.copy_key_file(vm2_inv) test_stub.create_test_file(vm1_inv, net_bandwidth) test_stub.create_test_file(vm2_inv, net_bandwidth) vm1.add_nic(l3_net_uuid2) vm2.add_nic(l3_net_uuid2) # Set a single nic to smaller bandwidth vm_nic = test_lib.lib_get_vm_nic_by_l3(vm1.vm, l3_net_uuid2) vm_ops.set_vm_nic_qos(vm_nic.uuid, outboundBandwidth=net_bandwidth * 8 * 1024 / 2) vm1.stop() vm2.stop() vm1.start() vm2.start() ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' cmd = "pkill dhclient" os.system("%s %s %s" % (ssh_cmd, vm1_ip, cmd)) os.system("%s %s %s" % (ssh_cmd, vm2_ip, cmd)) cmd = "dhclient eth0 eth1" os.system("%s %s %s" % (ssh_cmd, vm1_ip, cmd)) os.system("%s %s %s" % (ssh_cmd, vm2_ip, cmd)) cmd = '%s %s "ping %s -c 10"' % (ssh_cmd, vm1_ip, test_lib.lib_get_vm_nic_by_l3( vm2.get_vm(), l3_net_uuid2).ip) ping_ret = 1 while ping_ret: ping_ret = os.system(cmd) test_stub.test_scp_outbound_speed( vm1_ip, test_lib.lib_get_vm_nic_by_l3(vm2.get_vm(), l3_net_uuid2).ip, net_bandwidth / 2) #vm_ops.delete_instance_offering(new_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM Network 2nd nic QoS Test Pass')
def test(): global mevoco1_ip global mevoco2_ip global ipsec1 global ipsec2 mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['secondZStackMnIp'] test_util.test_dsc('Create test vm in mevoco1') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict1.add_vm(vm1) vm1.check() pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0] l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid vip1 = test_stub.create_vip('ipsec1_vip', l3_uuid1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create test vm in mevoco2') vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName')) test_obj_dict2.add_vm(vm2) vm2.check() pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0] l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Create ipsec in mevoco1') ipsec1 = ipsec_ops.create_ipsec_connection( 'ipsec1', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip1.get_vip().uuid, [os.environ['secondZStackCidrs']], policy_auth_algorithm="sha384") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create ipsec in mevoco2') ipsec2 = ipsec_ops.create_ipsec_connection( 'ipsec2', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [os.environ['firstZStackCidrs']], policy_auth_algorithm="sha384") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip ipsec_ops.delete_ipsec_connection(ipsec1.uuid) if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_lib.lib_error_cleanup(test_obj_dict1) vip1.delete() test_obj_dict1.rm_vip(vip1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip ipsec_ops.delete_ipsec_connection(ipsec2.uuid) test_lib.lib_error_cleanup(test_obj_dict2) vip2.delete() test_obj_dict2.rm_vip(vip2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_pass('Create Ipsec Success')
def test(): test_util.test_dsc(''' Will doing random test Security Group operations, including SG create/delete, rule add/remove, vm nics attach/detach. If reach max 4 coexisting running vm, testing will success and quit. Volume actions and Image actions are removed in this robot test. VM resources: Since SG testing will create target test vm, there might be max 12 running VMs: 4 VR VMs, 4 SG target test VMs and 4 test VMs. ''') target_running_vm = 4 target_l3s = test_lib.lib_get_limited_l3_network(2, 5) vr_num = 0 for target_l3 in target_l3s: vr_l3_uuid = target_l3.uuid vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid) temp_vm = None if not vrs: #create temp_vm for getting its vr for test pf_vm portforwarding vm_create_option = test_util.VmOption() vm_create_option.set_l3_uuids([vr_l3_uuid]) temp_vm = test_lib.lib_create_vm(vm_create_option) test_dict.add_vm(temp_vm) #we only need temp_vm's VR temp_vm.destroy() test_dict.rm_vm(temp_vm) vr_num += 1 #VIP testing need 3 VRs if vr_num > 2: break utility_vm_create_option = test_util.VmOption() utility_vm_create_option.set_image_uuid( test_lib.lib_get_image_by_name( img_name=os.environ.get('imageName_net')).uuid) l3_uuid = test_lib.lib_get_l3_by_name( os.environ.get('l3VlanNetworkName1')).uuid utility_vm_create_option.set_l3_uuids([l3_uuid]) utility_vm = test_lib.lib_create_vm(utility_vm_create_option) test_dict.add_utility_vm(utility_vm) utility_vm.check() vm_create_option = test_util.VmOption() #image has to use network test image, as it needs to do port checking vm_create_option.set_image_uuid( test_lib.lib_get_image_by_name( img_name=os.environ.get('imageName_net')).uuid) #Add 2 times sg_rule_operations. priority_actions = [test_state.TestAction.sg_rule_operations] * 2 vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid) public_l3 = test_lib.lib_find_vr_pub_nic(vrs[0]).l3NetworkUuid robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) robot_test_obj.set_utility_vm(utility_vm) robot_test_obj.set_vm_creation_option(vm_create_option) priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) robot_test_obj.set_public_l3(public_l3) test_util.test_dsc( 'Random Test Begin. Test target: 4 coexisting running VM (not include VR and SG target test VMs.).' ) rounds = 1 while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc('Round %s finished. Begin status checking.' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Create random VM Test Success')
def test(): test_util.test_dsc("create vpc vrouter and attach vpc l3 to vpc") for vpc_name in vpc_name_list: vr_list.append(test_stub.create_vpc_vrouter(vpc_name)) for vr, l3_list in izip(vr_list, vpc_l3_list): test_stub.attach_l3_to_vpc_vr(vr, l3_list) vr1, vr2 = vr_list test_util.test_dsc("create two vm, vm1 in l3 {}, vm2 in l3 {}".format( VLAN1_NAME, VXLAN1_NAME)) vm1 = test_stub.create_vm_with_random_offering( vm_name='vpc_vm_{}'.format(VLAN1_NAME), l3_name=VLAN1_NAME) test_obj_dict.add_vm(vm1) vm1.check() vm2 = test_stub.create_vm_with_random_offering( vm_name='vpc_vm_{}'.format(VXLAN1_NAME), l3_name=VXLAN1_NAME) test_obj_dict.add_vm(vm2) vm2.check() cond = res_ops.gen_query_conditions('name', '=', os.environ.get(SECOND_PUB)) second_pub_l3 = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] test_util.test_dsc("Create vroute route for vpc1") cond = res_ops.gen_query_conditions('name', '=', os.environ.get(VXLAN1_NAME)) vpc2_l3 = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vpc2_l3_cdir = vpc2_l3.ipRanges[0].networkCidr vpc2_second_pub_ip = [ nic.ip for nic in vr2.inv.vmNics if nic.l3NetworkUuid == second_pub_l3.uuid ][0] route_table1 = net_ops.create_vrouter_route_table('vpc1') route_entry1 = net_ops.add_vrouter_route_entry(route_table1.uuid, vpc2_l3_cdir, vpc2_second_pub_ip) net_ops.attach_vrouter_route_table_to_vrouter(route_table1.uuid, vr1.inv.uuid) test_util.test_dsc("Create vroute route for vpc2") cond = res_ops.gen_query_conditions('name', '=', os.environ.get(VLAN1_NAME)) vpc1_l3 = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vpc1_l3_cdir = vpc1_l3.ipRanges[0].networkCidr vpc1_second_pub_ip = [ nic.ip for nic in vr1.inv.vmNics if nic.l3NetworkUuid == second_pub_l3.uuid ][0] route_table2 = net_ops.create_vrouter_route_table('vpc2') route_entry1 = net_ops.add_vrouter_route_entry(route_table2.uuid, vpc1_l3_cdir, vpc1_second_pub_ip) net_ops.attach_vrouter_route_table_to_vrouter(route_table2.uuid, vr2.inv.uuid) vm1_inv, vm2_inv = [vm.get_vm() for vm in (vm1, vm2)] test_lib.lib_check_ping(vm1_inv, vm2_inv.vmNics[0].ip) test_lib.lib_check_ping(vm2_inv, vm1_inv.vmNics[0].ip) test_lib.lib_check_ports_in_a_command(vm1_inv, vm1_inv.vmNics[0].ip, vm2_inv.vmNics[0].ip, ["22"], [], vm2_inv) test_lib.lib_check_ports_in_a_command(vm2_inv, vm2_inv.vmNics[0].ip, vm1_inv.vmNics[0].ip, ["22"], [], vm1_inv) net_ops.detach_vrouter_route_table_from_vrouter(route_table1.uuid, vr1.inv.uuid) net_ops.detach_vrouter_route_table_from_vrouter(route_table2.uuid, vr2.inv.uuid) with test_lib.expected_failure('Check two vm pingable', Exception): test_lib.lib_check_ping(vm1_inv, vm2_inv.vmNics[0].ip) with test_lib.expected_failure('Check two vm pingable', Exception): test_lib.lib_check_ping(vm2_inv, vm1_inv.vmNics[0].ip) test_lib.lib_check_ports_in_a_command(vm1_inv, vm1_inv.vmNics[0].ip, vm2_inv.vmNics[0].ip, [], ["22"], vm2_inv) test_lib.lib_check_ports_in_a_command(vm2_inv, vm2_inv.vmNics[0].ip, vm1_inv.vmNics[0].ip, [], ["22"], vm1_inv) test_lib.lib_error_cleanup(test_obj_dict) test_stub.remove_all_vpc_vrouter()
def test(): test_util.test_dsc(''' Will doing random test for VIP operations, including VIP create/delete, PF create/attach/detach/remove, EIP create/attach/detach/remove. VM operations will also be tested. If reach max 4 coexisting running vm, testing will success and quit. SG actions, Volume actions and Image actions are removed in this robot test. VM resources: VIP testing needs at least 3 VRs are running. ''') target_running_vm = 4 target_l3s = test_lib.lib_get_limited_l3_network(2, 5) vr_num = 0 for target_l3 in target_l3s: vr_l3_uuid = target_l3.uuid vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid) temp_vm = None if not vrs: #create temp_vm for getting its vr for test pf_vm portforwarding vm_create_option = test_util.VmOption() vm_create_option.set_l3_uuids([vr_l3_uuid]) temp_vm = test_lib.lib_create_vm(vm_create_option) test_dict.add_vm(temp_vm) #we only need temp_vm's VR temp_vm.destroy() test_dict.rm_vm(temp_vm) vr_num += 1 #VIP testing need 3 VRs if vr_num > 2: break vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid) public_l3 = test_lib.lib_find_vr_pub_nic(vrs[0]).l3NetworkUuid vm_create_option = test_util.VmOption() #image has to use virtual router image, as it needs to do port checking vm_create_option.set_image_uuid( test_lib.lib_get_image_by_name( img_name=os.environ.get('imageName_net')).uuid) priority_actions = test_state.TestAction.vip_actions * 4 test_util.test_dsc( 'Random Test Begin. Test target: 4 coexisting running VM (not include VR and SG target test VMs.).' ) robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) robot_test_obj.set_vm_creation_option(vm_create_option) priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) robot_test_obj.set_exclusive_actions_list(\ test_state.TestAction.volume_actions \ + test_state.TestAction.image_actions \ + test_state.TestAction.sg_actions \ + test_state.TestAction.snapshot_actions) robot_test_obj.set_public_l3(public_l3) robot_test_obj.set_random_type(action_select.weight_fair_strategy) rounds = 1 current_time = time.time() timeout_time = current_time + 3600 + 3600 while time.time() <= timeout_time: #while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc( '===============Round %s finished. Begin status checking.================' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Create random VM Test Success')
def test(): ''' Test image requirements: 1. have nc to check the network port 2. have "nc" to open any port 3. it doesn't include a default firewall VR image is a good candiate to be the guest image. ''' test_util.test_dsc( "Create 3 VMs with vlan VR L3 network and using VR image.") vm1 = test_stub.create_sg_vm() test_obj_dict.add_vm(vm1) vm2 = test_stub.create_sg_vm() test_obj_dict.add_vm(vm2) vm3 = test_stub.create_sg_vm() test_obj_dict.add_vm(vm3) vm1.check() vm2.check() vm3.check() test_util.test_dsc("Create security groups.") sg1 = test_stub.create_sg() test_obj_dict.add_sg(sg1.security_group.uuid) sg2 = test_stub.create_sg() test_obj_dict.add_sg(sg2.security_group.uuid) sg3 = test_stub.create_sg() test_obj_dict.add_sg(sg3.security_group.uuid) sg_vm = test_sg_vm_header.ZstackTestSgVm() sg_vm.check() l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0] vm3_ip = test_lib.lib_get_vm_nic_by_l3(vm3.vm, l3_uuid).ip rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, vm3_ip) rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, vm3_ip) rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, vm3_ip) rule4 = test_lib.lib_gen_sg_rule(Port.rule4_ports, inventory.TCP, inventory.INGRESS, vm3_ip) rule5 = test_lib.lib_gen_sg_rule(Port.rule5_ports, inventory.TCP, inventory.INGRESS, vm3_ip) sg1.add_rule([rule1]) sg2.add_rule([rule1, rule2, rule3]) sg3.add_rule([rule3, rule4, rule5]) sg_vm.add_stub_vm(l3_uuid, vm3) sg_vm.check() nic_uuid1 = vm1.vm.vmNics[0].uuid nic_uuid2 = vm2.vm.vmNics[0].uuid vm1_nics = (nic_uuid1, vm1) vm2_nics = (nic_uuid2, vm2) #vm_nics = [nic_uuid1, nic_uuid2] #test_util.test_dsc("Create SG rule0: allow connection from vr to port 0~100. This is for enabling ssh connection from vr") #rule0 = inventory.SecurityGroupRuleAO() #rule0.allowedCidr = '%s/32' % vr_internal_ip #rule0.protocol = inventory.TCP #rule0.startPort = 0 #rule0.endPort = 100 #rule0.type = inventory.INGRESS #test_stub.lib_add_sg_rules(sg1.uuid, [rule0, rule1]) test_util.test_dsc("Add nic to security group 1.") test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports) sg_vm.attach(sg1, [vm1_nics, vm2_nics]) sg_vm.check() test_util.test_dsc("Remove nic from security group 1.") test_util.test_dsc("Allowed ingress ports: %s" % test_stub.target_ports) sg_vm.detach(sg1, nic_uuid1) sg_vm.detach(sg1, nic_uuid2) sg_vm.check() test_util.test_dsc("Remove rule1 from security group 1.") sg1.delete_rule([rule1]) sg_vm.check() test_util.test_dsc("Add rule1, rule2, rule3 to security group 1.") test_util.test_dsc("Allowed ingress ports: %s" % test_stub.target_ports) sg2.add_rule([rule1, rule2, rule3]) sg_vm.check() test_util.test_dsc("Add nic to security group 1 again.") tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports test_util.test_dsc("Allowed ingress ports: %s" % tmp_allowed_ports) sg_vm.attach(sg1, [vm1_nics, vm2_nics]) sg_vm.check() test_util.test_dsc("Remove rule2/3 from security group 1.") test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports) sg2.delete_rule([rule2, rule3]) sg_vm.check() #add sg2 to vm1 test_util.test_dsc("Add vm1 nic to security group 2.") tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports test_util.test_dsc("Allowed ingress ports for vm1 from vm3: %s" % tmp_allowed_ports) test_util.test_dsc("Allowed ingress ports for vm1 from vm3: %s" % test_stub.rule1_ports) test_util.test_dsc("Allowed ingress ports for vm2: %s" % test_stub.rule1_ports) sg_vm.attach(sg2, [vm1_nics]) sg_vm.check() #add sg2 to vm2 test_util.test_dsc("Add vm2 nic to security group 2.") tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports test_util.test_dsc("Allowed ingress ports for vm1/vm2: %s" % tmp_allowed_ports) sg_vm.attach(sg2, [vm2_nics]) sg_vm.check() #add sg3 test_util.test_dsc("Add vm1/vm2 nics to security group 3.") tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports + test_stub.rule4_ports + test_stub.rule5_ports test_util.test_dsc("Allowed ingress ports: %s" % tmp_allowed_ports) sg_vm.attach(sg3, [vm1_nics, vm2_nics]) sg_vm.check() #remove sg2 test_util.test_dsc("Remove security group 2 for nic.") tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule3_ports + test_stub.rule4_ports + test_stub.rule5_ports test_util.test_dsc("Allowed ingress ports: %s" % tmp_allowed_ports) sg_vm.detach(sg2, nic_uuid1) sg_vm.detach(sg2, nic_uuid2) sg_vm.check() #delete sg3 test_util.test_dsc("Delete security group 3.") test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports) sg_vm.delete_sg(sg3) test_obj_dict.rm_sg(sg3.security_group.uuid) sg_vm.check() #Cleanup sg_vm.delete_sg(sg2) test_obj_dict.rm_sg(sg2.security_group.uuid) sg_vm.delete_sg(sg1) test_obj_dict.rm_sg(sg1.security_group.uuid) sg_vm.check() vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) vm3.destroy() test_obj_dict.rm_vm(vm3) test_util.test_pass( 'Security Group Vlan VirtualRouter 2 VMs Group Ingress Test Success')
def test(): test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vr_vm('migrate_vm_with_snapshot', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() test_util.test_dsc('migrate vm and check snapshot') test_stub.migrate_vm_to_random_host(vm) vm.check() snapshots.check() snapshot1 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot2') snapshots.check() snapshots.create_snapshot('create_snapshot3') snapshots.check() snapshot3 = snapshots.get_current_snapshot() vm.stop() snapshots.use_snapshot(snapshot1) vm.start() snapshots.create_snapshot('create_snapshot1.1.1') snapshots.check() snapshots.create_snapshot('create_snapshot1.1.2') snapshots.check() vm.stop() snapshots.use_snapshot(snapshot1) vm.start() snapshots.create_snapshot('create_snapshot1.2.1') snapshots.check() snapshot1_2_1 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot1.2.2') snapshots.check() test_util.test_dsc('migrate vm and check snapshot') test_stub.migrate_vm_to_random_host(vm) vm.check() snapshots.check() vm.stop() snapshots.use_snapshot(snapshot3) vm.start() snapshots.check() snapshots.create_snapshot('create_snapshot4') snapshots.check() test_util.test_dsc('migrate vm and check snapshot') test_stub.migrate_vm_to_random_host(vm) vm.check() snapshots.check() test_util.test_dsc('Delete snapshot and check') snapshots.delete_snapshot(snapshot3) snapshots.check() test_util.test_dsc('migrate vm and check snapshot') test_stub.migrate_vm_to_random_host(vm) vm.check() snapshots.check() snapshots.delete_snapshot(snapshot1_2_1) snapshots.check() snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) vm.destroy() test_util.test_pass('Create Snapshot with VM migration test Success')
def test(): global vm, exist_users test_util.test_dsc('cloned vm change password test') vm = test_stub.create_vm(vm_name='1st-created-vm-u16', image_name="imageName_i_u16") test_obj_dict.add_vm(vm) vm.check() force_vm_auto_boot(vm) test_util.test_logger("change vm password for initial created vm") vm_ops.change_vm_password(vm.get_vm().uuid, "root", "password", skip_stopped_vm=None, session_uuid=None) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break #if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: # break #if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: # break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') for (usr, passwd) in zip(users, passwds): if usr not in exist_users: test_util.test_logger("find new account: <%s:%s>" % (usr, passwd)) test_stub.create_user_in_vm(vm.get_vm(), usr, passwd) exist_users.append(usr) #new vm->cloned new_vm1/new_vm2 test_util.test_logger("1st clone") new_vms = vm.clone(vm_names) if len(new_vms) != len(vm_names): test_util.test_fail( 'only %s VMs have been cloned, which is less than required: %s' % (len(new_vms), vm_names)) for new_vm in new_vms: new_vm.update() #new_vm.check() test_obj_dict.add_vm(new_vm) #When vm is running: test_util.test_logger( "vm running && change 1st cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(new_vm.get_vm().uuid, usr, passwd, skip_stopped_vm=None, session_uuid=None) if not test_lib.lib_check_login_in_vm(new_vm.get_vm(), usr, passwd): test_util.test_fail( "check login cloned vm with user:%s password: %s failed", usr, passwd) #When vm is stopped: #new_vm.stop() test_util.test_logger( "vm stopped && change 1st cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(new_vm.get_vm().uuid, "root", test_stub.original_root_password) #new_vm.start() new_vm.check() #test use the cloned vm change password to clone new vm and then change password test_util.test_logger("2nd cloned") in_new_vms = new_vm.clone(in_vm_names) new_vm.destroy() new_vm.check() new_vm.expunge() new_vm.check() for in_new_vm in in_new_vms: in_new_vm.update() test_obj_dict.add_vm(in_new_vm) test_util.test_logger( "vm running && change 2nd cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(in_new_vm.get_vm().uuid, usr, passwd, skip_stopped_vm=None, session_uuid=None) if not test_lib.lib_check_login_in_vm(in_new_vm.get_vm(), usr, passwd): test_util.test_fail( "check login cloned in_vm with user:%s password: %s failed", usr, passwd) #When vm is stopped: #in_new_vm.stop() test_util.test_logger( "vm stopped && change 2nd cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(in_new_vm.get_vm().uuid, "root", test_stub.original_root_password) #in_new_vm.start() in_new_vm.check() in_new_vm.destroy() in_new_vm.check() in_new_vm.expunge() in_new_vm.check() vm.destroy() vm.check() vm.expunge() vm.check() test_util.test_pass('Set password when VM is creating is successful.')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions("status", '=', "Connected") bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) command = "echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso" % os.environ.get( 'zstackInstallPath') test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) img_option.set_url( 'http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'])) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'maintain') test_stub.maintain_all_pss() if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm.vm.uuid) #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) test_stub.ensure_hosts_connected(120) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() vm.destroy() vm.check() #vm.expunge() #vm.check() test_util.test_pass('PS maintain mode Test Success')
def test(): global vm_inv test_util.test_dsc('Create test vm to test zstack upgrade by -u.') image_name = os.environ.get('imageName_i_c7_z_1.5') iso_path = os.environ.get('iso_path') iso_19_path = os.environ.get('iso_19_path') iso_10_path = os.environ.get('iso_10_path') iso_20_path = os.environ.get('iso_20_path') iso_21_path = os.environ.get('iso_21_path') iso_230_path = os.environ.get('iso_230_path') zstack_latest_version = os.environ.get('zstackLatestVersion') zstack_latest_path = os.environ.get('zstackLatestInstaller') vm_name = os.environ.get('vmName') upgrade_script_path = os.environ.get('upgradeScript') vm_inv = test_stub.create_vm_scenario(image_name, vm_name) vm_ip = vm_inv.vmNics[0].ip test_lib.lib_wait_target_up(vm_ip, 22) test_stub.make_ssh_no_password(vm_ip, tmp_file) test_util.test_dsc('Update MN IP') test_stub.update_mn_hostname(vm_ip, tmp_file) test_stub.update_mn_ip(vm_ip, tmp_file) test_stub.reset_rabbitmq(vm_ip, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_installation(vm_ip, tmp_file) test_stub.update_19_iso(vm_ip, tmp_file, iso_19_path, upgrade_script_path) #pkg_num = 1.6 release_ver = [ '1.6', '1.7', '1.8', '1.9', '1.10', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.3.1' ] curren_num = float(os.environ.get('releasePkgNum')) #while pkg_num <= curren_num: for pkg_num in release_ver: test_util.test_logger('Upgrade zstack to %s' % pkg_num) #if str(pkg_num) == '1.7': # test_stub.update_19_iso(vm_ip, tmp_file, iso_19_path, upgrade_script_path) if str(pkg_num) == '1.10': test_stub.update_10_iso(vm_ip, tmp_file, iso_10_path, upgrade_script_path) if str(pkg_num) == '2.0.0': test_stub.update_20_iso(vm_ip, tmp_file, iso_20_path, upgrade_script_path) if str(pkg_num) == '2.1.0': test_stub.update_21_iso(vm_ip, tmp_file, iso_21_path, upgrade_script_path) if str(pkg_num) == '2.3.0': test_stub.update_230_iso(vm_ip, tmp_file, iso_230_path, upgrade_script_path) upgrade_pkg = os.environ.get('zstackPkg_%s' % pkg_num) test_stub.upgrade_zstack(vm_ip, upgrade_pkg, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_zstack_version(vm_ip, tmp_file, str(pkg_num)) #test_stub.check_installation(vm_ip, tmp_file) #pkg_num = pkg_num + 0.1 test_util.test_dsc('Upgrade zstack to latest') test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path) test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version) test_stub.check_installation(vm_ip, tmp_file) os.system('rm -f %s' % tmp_file) test_stub.destroy_vm_scenario(vm_inv.uuid) test_util.test_pass('ZStack upgrade Test Success')
def test(): global route_table1_uuid global route_table2_uuid test_util.test_dsc('Check vm connection with vrouter route') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm1) vm1_ip = vm1.get_vm().vmNics[0].ip vr1 = test_lib.lib_find_flat_dhcp_vr_by_vm(vm1.vm)[0] vr1_uuid = vr1.uuid vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1) vr1_private_ip = test_lib.lib_find_vr_private_ip(vr1) l3network1_uuid = vm1.get_vm().vmNics[0].l3NetworkUuid cond = res_ops.gen_query_conditions('uuid', '=', l3network1_uuid) l3network1_cidr = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName3')) test_obj_dict.add_vm(vm2) vm2_ip = vm2.get_vm().vmNics[0].ip vr2 = test_lib.lib_find_flat_dhcp_vr_by_vm(vm2.vm)[0] vr2_uuid = vr2.uuid vr2_pub_ip = test_lib.lib_find_vr_pub_ip(vr2) vr2_private_ip = test_lib.lib_find_vr_private_ip(vr2) l3network2_uuid = vm2.get_vm().vmNics[0].l3NetworkUuid cond = res_ops.gen_query_conditions('uuid', '=', l3network2_uuid) l3network2_cidr = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr vm1.check() vm2.check() #Attach the network service to l3 network cond = res_ops.gen_query_conditions('type', '=', 'vrouter') service_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, cond)[0].uuid network_services_json = "{'%s':['VRouterRoute']}" % service_uuid net_ops.detach_network_service_from_l3network(l3network1_uuid, service_uuid) net_ops.attach_network_service_to_l3network(l3network1_uuid, service_uuid) net_ops.detach_network_service_from_l3network(l3network2_uuid, service_uuid) net_ops.attach_network_service_to_l3network(l3network2_uuid, service_uuid) #Create route table and route entry for each vrouter route_table1 = net_ops.create_vrouter_route_table('route_table1') route_table1_uuid = route_table1.uuid route_entry1 = net_ops.add_vrouter_route_entry(route_table1_uuid, l3network2_cidr, vr2_pub_ip) route_table2 = net_ops.create_vrouter_route_table('route_table2') route_table2_uuid = route_table2.uuid route_entry2 = net_ops.add_vrouter_route_entry(route_table2_uuid, l3network1_cidr, vr1_pub_ip) #Attach the route table to vrouter and check the network #net_ops.detach_vrouter_route_table_from_vrouter(route_table1_uuid, vr1_uuid) net_ops.attach_vrouter_route_table_to_vrouter(route_table1_uuid, vr1_uuid) #net_ops.detach_vrouter_route_table_from_vrouter(route_table2_uuid, vr2_uuid) net_ops.attach_vrouter_route_table_to_vrouter(route_table2_uuid, vr2_uuid) #Add vroute private ip to vm route cmd = 'ip r del default; ip r add default via %s' % vr1_private_ip rsp = test_lib.lib_execute_ssh_cmd(vm1_ip, 'root', 'password', cmd, 180) cmd = 'ip r del default; ip r add default via %s' % vr2_private_ip rsp = test_lib.lib_execute_ssh_cmd(vm2_ip, 'root', 'password', cmd, 180) if not test_lib.lib_check_ping(vm1.vm, vm2_ip, no_exception=True): test_util.test_fail( 'Exception: [vm:] %s ping [vr:] %s fail. But it should ping successfully.' % (vm1.vm.uuid, vm2_ip)) if not test_lib.lib_check_ping(vm1.vm, vm1_ip, no_exception=True): test_util.test_fail( 'Exception: [vm:] %s ping [vr:] %s fail. But it should ping successfully.' % (vm2.vm.uuid, vm1_ip)) #Dettach the route table to vrouter and check the network net_ops.detach_vrouter_route_table_from_vrouter(route_table1_uuid, vr1_uuid) net_ops.detach_vrouter_route_table_from_vrouter(route_table2_uuid, vr2_uuid) if test_lib.lib_check_ping(vm1.vm, vm2_ip, no_exception=True): test_util.test_fail( 'Exception: [vm:] %s ping [vr:] %s successfully. But it should ping fail because the route table is detached.' % (vm1.vm.uuid, vm2_ip)) if test_lib.lib_check_ping(vm1.vm, vm2_ip, no_exception=True): test_util.test_fail( 'Exception: [vm:] %s ping [vr:] %s successfully. But it should ping fail because the route table is detached.' % (vm2.vm.uuid, vm1_ip)) #Delete route entry and table, and distory vm net_ops.delete_vrouter_route_entry(route_entry1.uuid, route_table1_uuid) net_ops.delete_vrouter_route_entry(route_entry2.uuid, route_table2_uuid) net_ops.delete_vrouter_route_table(route_table1_uuid) net_ops.delete_vrouter_route_table(route_table2_uuid) vm1.destroy() vm2.destroy() test_util.test_pass('Check VRouter route Success')
def test(): test_stub = test_lib.lib_get_test_stub() test_obj_dict1 = test_state.TestStateDict() test_obj_dict2 = test_state.TestStateDict() global mevoco1_ip global mevoco2_ip global ipsec1 global ipsec2 global templateContent mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['secondZStackMnIp'] test_util.test_dsc('Create test vip in mevoco1') cond = res_ops.gen_query_conditions("category", '=', "Public") l3_pub1_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) cond = res_ops.gen_query_conditions("name", '=', os.environ.get('l3VlanNetworkName1')) l3_pri1_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) vip1 = test_stub.create_vip('ipsec1_vip', l3_pub1_queried[0].uuid) cond = res_ops.gen_query_conditions('uuid', '=', l3_pri1_queried[0].uuid) first_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create test vip in mevoco2') cond = res_ops.gen_query_conditions("category", '=', "Public") l3_pub2_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) cond = res_ops.gen_query_conditions( "name", '=', os.environ.get('l3VlanDNATNetworkName')) l3_pri2_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName')) vip2 = test_stub.create_vip('ipsec2_vip', l3_pub2_queried[0].uuid) cond = res_ops.gen_query_conditions('uuid', '=', l3_pri2_queried[0].uuid) second_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr templateContent = ''' { "ZStackTemplateFormatVersion": "2018-06-18", "Description": "本示例会创建一个简单的IPsec通道,需要用户提供下面正确的数据\n已有的虚拟IP地址,\n本地子网Uuid,远端IP,远端CIDR,认证密钥", "Parameters": { "VipUuid":{ "Type": "String", "Label": "虚拟IP", "Description":"已有的虚拟IP的Uuid" }, "PrivateNetworkUuid":{ "Type": "String", "Label": "本地网络", "Description":"本地网络Uuid" }, "PeerAddress": { "Type": "String", "Description":"远端IP" }, "PeerCidrs":{ "Type": "CommaDelimitedList", "Description":"远端 Cidr" }, "AuthKey":{ "Type": "String", "DefaultValue":"Test1234" } }, "Resources": { "IPsecConnection":{ "Type": "ZStack::Resource::IPsecConnection", "Properties": { "name": "IPsec-STACK", "vipUuid": {"Ref": "VipUuid"}, "l3NetworkUuid": {"Ref":"PrivateNetworkUuid"}, "peerAddress": {"Ref":"PeerAddress"}, "peerCidrs": {"Ref":"PeerCidrs"}, "authKey": {"Ref":"AuthKey"} } } }, "Outputs": { "IPsecConnection": { "Value": { "Ref": "IPsecConnection" } } } } ''' #1.create resource stack os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Create ipsec in mevoco1') resource_stack1_option = test_util.ResourceStackOption() resource_stack1_option.set_name("Create_STACK-IPSEC1") resource_stack1_option.set_rollback("true") print('aooo = %s is %s') % ([second_zstack_cidrs ], type([second_zstack_cidrs])) parameter1 = '{"VipUuid":"%s","PrivateNetworkUuid":"%s","PeerAddress":"%s","PeerCidrs":"%s"}' % ( vip1.get_vip().uuid, l3_pri1_queried[0].uuid, vip2.get_vip().ip, second_zstack_cidrs) resource_stack1_option.set_templateContent(templateContent) resource_stack1_option.set_parameters(parameter1) preview_resource_stack1 = resource_stack_ops.preview_resource_stack( resource_stack1_option) resource_stack1 = resource_stack_ops.create_resource_stack( resource_stack1_option) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create ipsec in mevoco2') resource_stack2_option = test_util.ResourceStackOption() resource_stack2_option.set_name("Create_STACK-IPSEC2") resource_stack2_option.set_rollback("true") parameter2 = '{"VipUuid":"%s","PrivateNetworkUuid":"%s","PeerAddress":"%s","PeerCidrs":"%s"}' % ( vip2.get_vip().uuid, l3_pri2_queried[0].uuid, vip1.get_vip().ip, first_zstack_cidrs) resource_stack2_option.set_templateContent(templateContent) resource_stack2_option.set_parameters(parameter2) preview_resource_stack2 = resource_stack_ops.preview_resource_stack( resource_stack2_option) resource_stack2 = resource_stack_ops.create_resource_stack( resource_stack2_option) #2.query resource stack os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Query resource stack in mevoco1') cond = res_ops.gen_query_conditions('uuid', '=', resource_stack1.uuid) resource_stack1_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'IPsec-STACK') ipsec1_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond) if len(resource_stack1_queried) == 0: test_util.test_fail("Fail to query resource stack") if resource_stack1_queried[0].status == 'Created': if len(ipsec1_queried) == 0: test_util.test_fail( "Fail to create ipsec connection when resource stack status is Created" ) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Query resource stack in mevoco2') cond = res_ops.gen_query_conditions('uuid', '=', resource_stack2.uuid) resource_stack2_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'IPsec-STACK') ipsec2_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond) if len(resource_stack2_queried) == 0: test_util.test_fail("Fail to query resource stack") if resource_stack2_queried[0].status == 'Created': if len(ipsec2_queried) == 0: test_util.test_fail( "Fail to create ipsec connection when resource stack status is Created" ) #3.get resource from resource stack os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Get resource from resource stack in mevoco1') resource1 = resource_stack_ops.get_resource_from_resource_stack( resource_stack1.uuid) if resource1 == None or len(resource1) != 1: test_util.test_fail("Fail to get resource from resource_stack") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Get resource from resource stack in mevoco2') resource2 = resource_stack_ops.get_resource_from_resource_stack( resource_stack2.uuid) if resource2 == None or len(resource1) != 1: test_util.test_fail("Fail to get resource from resource_stack") #4.query event from resource stack os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Get resource from resource stack in mevoco1') cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack1.uuid) event1 = res_ops.query_event_from_resource_stack(cond) if event1 == None or len(event1) != 2: test_util.test_fail("Fail to get event from resource_stack") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Get resource from resource stack in mevoco2') cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack2.uuid) event2 = res_ops.query_event_from_resource_stack(cond) if event2 == None or len(event2) != 2: test_util.test_fail("Fail to get event from resource_stack") #5.delete resource stack os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Delete resource stack in mevoco1') resource_stack_ops.delete_resource_stack(resource_stack1.uuid) cond = res_ops.gen_query_conditions('uuid', '=', resource_stack1.uuid) resource_stack1_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-IPSEC1') ipsec1_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond) if len(resource_stack1_queried) != 0: test_util.test_fail("Fail to delete resource stack") elif len(ipsec1_queried) != 0: test_util.test_fail( "Fail to delete ipsec connection when resource stack is deleted") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Delete resource stack in mevoco2') resource_stack_ops.delete_resource_stack(resource_stack2.uuid) cond = res_ops.gen_query_conditions('uuid', '=', resource_stack2.uuid) resource_stack2_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-IPSEC2') ipsec2_queried = res_ops.query_resource(res_ops.IPSEC_CONNECTION, cond) if len(resource_stack2_queried) != 0: test_util.test_fail("Fail to delete resource stack") elif len(ipsec2_queried) != 0: test_util.test_fail( "Fail to delete ipsec connection when resource stack is deleted") test_util.test_pass('Create IPsec Resource Stack Test Success')
def test(): global vm_inv global zone_inv global cluster_inv global host_inv test_util.test_dsc( 'Create test vm to test zstack install MN on centos7.2 and add the HOST' ) conditions = res_ops.gen_query_conditions( 'name', '=', os.environ.get('imageNameBase_c72')) image = res_ops.query_resource(res_ops.IMAGE, conditions)[0] vm_inv = create_vm(image) time.sleep(100) iso_path = os.environ.get('iso_path') upgrade_script_path = os.environ.get('upgradeScript') test_util.test_dsc('Install zstack with -o') vm_ip = vm_inv.vmNics[0].ip test_stub.make_ssh_no_password(vm_ip, tmp_file) test_util.test_dsc('Upgrade master iso') test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path) target_file = '/root/zstack-all-in-one.tgz' test_stub.prepare_test_env(vm_inv, target_file) ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip args = "-o" test_util.test_dsc('start installing the latest zstack-MN') test_stub.execute_install_with_args(ssh_cmd, args, target_file, tmp_file) test_util.test_dsc('check add the sftp bs and delete the sftp bs') test_stub.check_installation(vm_ip, tmp_file) test_util.test_dsc('create zone name is zone1') zone_inv = test_stub.create_zone1(vm_ip, tmp_file) zone_uuid = zone_inv.uuid test_util.test_dsc('create cluster name is clsuter1') cluster_inv = test_stub.create_cluster1(vm_ip, zone_uuid, tmp_file) cluster_uuid = cluster_inv.uuid test_util.test_dsc('add host name is HOST1') host_inv = test_stub.add_kvm_host1(vm_ip, cluster_uuid, tmp_file) host_uuid = host_inv.uuid os.system('rm -f %s' % tmp_file) sce_ops.destroy_vm(zstack_management_ip, vm_inv.uuid) test_util.test_pass('Install ZStack with -o centos7.2 mini-iso Success')
def test(): global vm1, vm2, vip l3_vr_network = os.environ['l3vCenterNoVlanNetworkName'] image_name = os.environ['image_dhcp_name'] test_util.test_dsc('Create test vm with lb.') vm1 = test_stub.create_vm_in_vcenter(vm_name='test_vip_lb_1', image_name=image_name, l3_name=l3_vr_network) test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm_in_vcenter(vm_name='test_vip_lb_2', image_name=image_name, l3_name=l3_vr_network) test_obj_dict.add_vm(vm2) time.sleep(50) vm_nic1 = vm1.get_vm().vmNics[0] vm_nic1_uuid = vm_nic1.uuid vm_nic1_ip = vm_nic1.ip vm_nic2 = vm2.get_vm().vmNics[0] vm_nic2_uuid = vm_nic2.uuid vm_nic2_ip = vm_nic2.ip test_stub.set_httpd_in_vm(vm1.get_vm(), vm_nic1_ip) test_stub.set_httpd_in_vm(vm2.get_vm(), vm_nic2_ip) pri_l3_uuid = vm_nic1.l3NetworkUuid vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0] vr_pub_ip = test_lib.lib_find_vr_pub_ip(vr) vip = zstack_vip_header.ZstackTestVip() vip.get_snat_ip_as_vip(vr_pub_ip) vip.isVcenter = True test_obj_dict.add_vip(vip) lb = zstack_lb_header.ZstackTestLoadBalancer() lb2 = zstack_lb_header.ZstackTestLoadBalancer() lb.create('create lb test', vip.get_vip().uuid) lb2.create('create lb2 test', vip.get_vip().uuid) lb.isVcenter = True lb2.isVcenter = True test_obj_dict.add_load_balancer(lb) test_obj_dict.add_load_balancer(lb2) vip.attach_lb(lb) vip.attach_lb(lb2) lb_creation_option = test_lib.lib_create_lb_listener_option(lbl_port=222, lbi_port=22) lb2_creation_option = test_lib.lib_create_lb_listener_option(lbl_port=2222, lbi_port=80) lbl = lb.create_listener(lb_creation_option) lbl2 = lb2.create_listener(lb2_creation_option) lbl.add_nics([vm_nic1_uuid, vm_nic2_uuid]) lbl2.add_nics([vm_nic1_uuid, vm_nic2_uuid]) vm1.check() vm2.check() lb.check() lb2.check() vip.check() lb.delete() lb2.delete() test_obj_dict.rm_vip(vip) test_obj_dict.rm_load_balancer(lb) test_obj_dict.rm_load_balancer(lb2) lb.check() lb2.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create Load Balancer Test Success')
def test(): global res global original_rate test_util.test_dsc('Test storage over provision method') primary_storage_list = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for ps in primary_storage_list: # if ps.type == "SharedBlock": if ps.type in ["SharedBlock", "AliyunNAS"]: test_util.test_skip( 'SharedBlock primary storage does not support overProvision') test_lib.lib_skip_if_ps_num_is_not_eq_number(1) zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit=1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.') return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit=1) if not ps: test_util.test_skip( 'No Enabled/Connected primary storage was found, skip test.') return True host = host[0] ps = ps[0] ps_type = ps.type #TODO: Fix ceph testing if ps_type == 'Ceph' or ps_type == 'SharedMountPoint': test_util.test_skip('skip test for ceph and smp.') over_provision_rate = 2.5 target_volume_num = 12 kept_disk_size = 10 * 1024 * 1024 vm = test_stub.create_vm(vm_name = 'storage_over_prs_vm_1', \ host_uuid = host.uuid) test_obj_dict.add_vm(vm) vm.check() avail_cap = get_storage_capacity(ps_type, host.uuid, ps.uuid) if avail_cap < kept_disk_size: test_util.test_skip( 'available disk capacity:%d is too small, skip test.' % avail_cap) return True res = sizeunit.get_size(test_lib.lib_get_reserved_primary_storage()) original_rate = test_lib.lib_set_provision_storage_rate( over_provision_rate) #data_volume_size = int(over_provision_rate * (avail_cap - kept_disk_size) / target_volume_num) data_volume_size = int(over_provision_rate * (avail_cap - res) / target_volume_num) #will change the rate back to check if available capacity is same with original one. This was a bug, that only happened when system create 1 vm. test_lib.lib_set_provision_storage_rate(original_rate) avail_cap_tmp = get_storage_capacity(ps_type, host.uuid, ps.uuid) if avail_cap != avail_cap_tmp: test_util.test_fail( 'disk size is not same, between 2 times provision. Before change over rate, 1st cap: %d; 2nd cap: %d' % (avail_cap, avail_cap_tmp)) test_lib.lib_set_provision_storage_rate(over_provision_rate) test_util.test_logger( 'Will create a serial of volume. Each of them will have %d size.' % data_volume_size) disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('storage-over-ps-test') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(data_volume_offering.uuid) times = 1 while (times <= target_volume_num): try: volume_creation_option.set_name('volume-%d' % times) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_logger( 'Current available storage size: %d' % get_storage_capacity(ps_type, host.uuid, ps.uuid)) volume.attach(vm) except Exception as e: test_util.test_logger( "Unexpected volume Creation Failure in storage over provision test. " ) raise e times += 1 time.sleep(2) avail_cap2 = (get_storage_capacity(ps_type, host.uuid, ps.uuid) - res) if avail_cap2 > data_volume_size: test_util.test_fail( 'Available disk size: %d is still bigger than offering disk size: %d , after creating %d volumes.' % (avail_cap2, data_volume_size, target_volume_num)) try: volume_creation_option.set_name('volume-%d' % (times + 1)) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) except: test_util.test_logger( "Expected Volume Creation Failure in storage over provision test. " ) else: test_util.test_fail( "The %dth Volume is still attachable, which is wrong" % (target_volume_num + 1)) test_lib.lib_set_provision_storage_rate(original_rate) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Memory Over Provision Test Pass')
def test(): ps_env = test_stub.PSEnvChecker() local_ps = ps_env.get_random_local() nfs_ps = ps_env.get_random_nfs() test_util.test_dsc("Create {0} vm ".format(VM_COUNT)) vm = test_stub.create_multi_vms(name_prefix='test-', count=VM_COUNT)[0] vm.check() test_obj_dict.add_vm(vm) test_util.test_dsc("Create {0} volumes in NFS".format(VOLUME_NUMBER)) volume_in_nfs = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=nfs_ps) for volume in volume_in_nfs: test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc("Attach all volumes to VM") for volume in volume_in_nfs: volume.attach(vm) volume.check() test_util.test_dsc("disable local PS") ps_ops.change_primary_storage_state(local_ps.uuid, state='disable') disabled_ps_list.append(local_ps) test_util.test_dsc("make sure all VM and Volumes still OK and running") vm.check() for volume in volume_in_nfs: volume.check() test_util.test_dsc("Try to create vm with datavolume") with test_stub.expected_failure( 'Create vm with datavol in nfs-local env when local disabled', Exception): test_stub.create_multi_vms(name_prefix='test-vm', count=1, datavolume=10) test_util.test_dsc("Try to create datavolume in NFS") volume = test_stub.create_multi_volumes(count=1, ps=nfs_ps)[0] test_obj_dict.add_volume(volume) test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): test_util.test_dsc('Create test vm with lb.') vm1 = test_stub.create_lb_vm() test_obj_dict.add_vm(vm1) vm2 = test_stub.create_lb_vm() test_obj_dict.add_vm(vm2) #l3_name = os.environ.get('l3VlanNetworkName1') #vr1 = test_stub.get_vr_by_private_l3_name(l3_name) #l3_name = os.environ.get('l3NoVlanNetworkName1') #vr2 = test_stub.get_vr_by_private_l3_name(l3_name) vm_nic1 = vm1.get_vm().vmNics[0] vm_nic1_uuid = vm_nic1.uuid vm_nic1_ip = vm_nic1.ip vm_nic2 = vm2.get_vm().vmNics[0] vm_nic2_uuid = vm_nic2.uuid vm_nic2_ip = vm_nic2.ip vm1.check() vm2.check() #test_lib.lib_wait_target_up(vm_nic1_ip, "root", 120) #test_lib.lib_wait_target_up(vm_nic2_ip, "root", 120) test_stub.set_httpd_in_vm(vm_nic1_ip, "root", "password") test_stub.set_httpd_in_vm(vm_nic2_ip, "root", "password") pri_l3_uuid = vm_nic1.l3NetworkUuid vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0] vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr) l3_uuid = vr_pub_nic.l3NetworkUuid vip = test_stub.create_vip('vip_for_lb_test', l3_uuid) test_obj_dict.add_vip(vip) lb = zstack_lb_header.ZstackTestLoadBalancer() lb2 = zstack_lb_header.ZstackTestLoadBalancer() lb.create('create lb test', vip.get_vip().uuid) lb2.create('create lb2 test', vip.get_vip().uuid) test_obj_dict.add_load_balancer(lb) test_obj_dict.add_load_balancer(lb2) vip.attach_lb(lb) vip.attach_lb(lb2) lb_creation_option = test_lib.lib_create_lb_listener_option() lb2_creation_option = test_lib.lib_create_lb_listener_option(lbl_port=2222, lbi_port=80) lbl = lb.create_listener(lb_creation_option) lbl2 = lb2.create_listener(lb2_creation_option) lbl.add_nics([vm_nic1_uuid, vm_nic2_uuid]) lbl2.add_nics([vm_nic1_uuid, vm_nic2_uuid]) vm1.check() vm2.check() lb.check() lb2.check() vip.check() lb.delete() lb2.delete() vip.delete() test_obj_dict.rm_vip(vip) test_obj_dict.rm_load_balancer(lb) test_obj_dict.rm_load_balancer(lb2) lb.check() lb2.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create Load Balancer Test Success')
def test(): global original_rate global new_offering_uuid test_util.test_dsc('Test memory allocation and reclaiming.') cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit=1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.') return True host = host[0] over_provision_rate = 1 target_vm_num = 5 host_res = test_lib.lib_get_cpu_memory_capacity(host_uuids=[host.uuid]) #avail_mem = host_res.availableMemory * over_provision_rate avail_mem = host_res.availableMemory if avail_mem <= 1024 * 1024 * 1024: test_util.test_skip('Available memory is less than 1024MB, skip test.') return True original_rate = test_lib.lib_set_provision_memory_rate(over_provision_rate) host_res = test_lib.lib_get_cpu_memory_capacity(host_uuids=[host.uuid]) avail_mem = host_res.availableMemory test_mem = avail_mem / target_vm_num new_offering_mem = test_mem new_offering = test_lib.lib_create_instance_offering( memorySize=new_offering_mem) new_offering_uuid = new_offering.uuid rounds = 0 while (rounds < 3): times = 1 while (times <= (target_vm_num + 3)): try: vm = test_stub.create_vm(vm_name = 'mem_reclaim_vm_%d' % times, \ host_uuid = host.uuid, \ instance_offering_uuid = new_offering.uuid) test_obj_dict.add_vm(vm) except Exception as e: test_util.test_logger( "VM Creation Failure in memory reclaiming test. :%s " % e) pass times += 1 for vm in test_obj_dict.get_all_vm_list(): try: vm.destroy() test_obj_dict.rm_vm(vm) except Exception as e: test_util.test_logger( "VM Destroying Failure in memory reclaiming test. :%s " % e) pass host_res2 = test_lib.lib_get_cpu_memory_capacity( host_uuids=[host.uuid]) avail_mem2 = host_res2.availableMemory if avail_mem2 != avail_mem: test_util.test_fail( 'Available memory reclaiming is not correct. Current available memory : %d, original available memory: %d , after creating and destroying %d vms. in round: %d' % (avail_mem2, avail_mem, target_vm_num, rounds)) rounds += 1 test_lib.lib_set_provision_memory_rate(original_rate) vm_ops.delete_instance_offering(new_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Memory Over Provision Test Pass')
def test(): global ps_inv global ps_uuid global cluster_uuid global tag curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up primary storage 1 and set system tag for instance offering. ps_name1 = os.environ.get('nfsPrimaryStorageName1') ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name1)[0] ps_uuid = ps_inv.uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \ conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_vm_ps_ops') tag = tag_ops.create_system_tag('InstanceOfferingVO', \ instance_offering_uuid, \ 'primaryStorage::allocator::uuid::%s' % ps_uuid) l3_name = os.environ.get('l3VlanNetworkName1') l3 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name)[0] vm_creation_option.set_l3_uuids([l3.uuid]) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) cluster_uuid = vm1.get_vm().clusterUuid test_util.test_dsc("Detach Primary Storage") ps_ops.detach_primary_storage(ps_uuid, cluster_uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm1.set_state(vm_header.STOPPED) vm1.check() vm1.start() vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) test_util.test_dsc("Delete Primary Storage") tag_ops.delete_tag(tag.uuid) ps_ops.delete_primary_storage(ps_inv.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.DESTROYED) vm1.set_state(vm_header.DESTROYED) vm1.check() test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.DESTROYED) vm2.set_state(vm_header.DESTROYED) vm2.check() try: vm3 = test_lib.lib_create_vm(vm_creation_option) except: test_util.test_logger('Catch expected vm creation exception, since primary storage has been deleted. ') else: test_util.test_fail('Fail: Primary Storage has been deleted. But vm is still created with it.') recover_ps() test_util.test_dsc("Attach Primary Storage") test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test primary storage operations Success')
def test(): global curr_deploy_conf global l2_name2 curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up l3 l3_1 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name1)[0] l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name2)[0] l2_2 = res_ops.get_resource(res_ops.L2_NETWORK, \ uuid = l3_2.l2NetworkUuid)[0] l2_name2 = l2_2.name conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_l3_uuids([l3_1.uuid, l3_2.uuid]) cluster1_name = os.environ.get('clusterName2') cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name)[0] vm_creation_option.set_cluster_uuid(cluster1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) test_util.test_dsc('Delete l2_2') net_ops.delete_l2(l2_2.uuid) #Since 0.8, delete L3 won't delete VM. It will just detach L3 nic. #test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) vm1.update() #vm1.set_state(vm_header.STOPPED) vm1.check() #test_util.test_dsc('start vm again. vm should remove the deleted l2') #vm1.start() #add l2 resource will also add l3 resource net_ops.add_l2_resource(curr_deploy_conf, l2_name=l2_name2) #update l3_2, since it is readded. l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name2)[0] vm_creation_option.set_l3_uuids([l3_1.uuid, l3_2.uuid]) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) #check vm1 vm2 status. vm1.check() if not len(vm1.get_vm().vmNics) == 1: test_util.test_fail( 'vm1 vmNics still have L3: %s, even if it is deleted' % l3_2.uuid) vm2.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete L2 Test Success')