def test():
    #skip ceph in c74
    cmd = "cat /etc/redhat-release | grep '7.4'"
    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    rsp = test_lib.lib_execute_ssh_cmd(mn_ip, 'root', 'password', cmd, 180)
    if rsp != False:
        ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
        for i in ps:
            if i.type == 'Ceph':
                test_util.test_skip('cannot hotplug iso to the vm in ceph,it is a libvirt bug:https://bugzilla.redhat.com/show_bug.cgi?id=1541702.')

    multi_iso.add_iso_image()
    multi_iso.create_windows_vm()
    test_obj_dict.add_vm(multi_iso.vm1)

    multi_iso.get_all_iso_uuids()
    multi_iso.attach_iso(multi_iso.iso_uuids[0])
    multi_iso.attach_iso(multi_iso.iso_uuids[1])
    multi_iso.attach_iso(multi_iso.iso_uuids[2])
    multi_iso.check_windows_vm_cdrom(3)

    multi_iso.detach_iso(multi_iso.iso_uuids[1])
    multi_iso.check_windows_vm_cdrom(2)
#     multi_iso.vm1.reboot()
    multi_iso.detach_iso(multi_iso.iso_uuids[0])
    multi_iso.check_windows_vm_cdrom(1)
    multi_iso.detach_iso(multi_iso.iso_uuids[2])
    multi_iso.check_windows_vm_cdrom(0)

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Attach 3 ISO Test Success')
    def check(self):
        import json
        import zstacklib.utils.jsonobject as jsonobject
        super(zstack_vid_policy_db_checker, self).check()
        try:
            conditions = res_ops.gen_query_conditions('uuid', '=', self.test_obj.get_vid().uuid)
            vid = res_ops.query_resource(res_ops.IAM2_VIRTUAL_ID, conditions)[0]
        except Exception as e:
            traceback.print_exc(file=sys.stdout)
            test_util.test_logger('Check result: [vid Inventory uuid:] %s does not exist in database.' % self.test_obj.get_vid().uuid)
            return self.judge(False)

        conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=', self.test_obj.get_vid().uuid)
        role_statements = res_ops.query_resource(res_ops.ROLE, conditions)[0].statements 
        for state_lst in self.test_obj.get_vid_statements()[0]['actions']:
            test_result = False
            for statement in role_statements:
                atatement_dict = json.loads(jsonobject.dumps(statement))['statement']
                for lst in atatement_dict['actions']:
                    if lst == state_lst:
                        test_result = True
            if test_result == False:
                test_util.test_logger('Check result: [vid Inventory statement:] does not exist in database.')
                return self.judge(False)
        return self.judge(True)
def add_image_config(root_xml, original_images_setting, session_uuid = None):
    images_xml = etree.SubElement(root_xml, "images")
    cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
    images = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)
    
    pre_images = {}
    if original_images_setting:
        pre_images_list = \
                original_images_setting.get_child_node_as_list('image')
        for pre_image in pre_images_list:
            pre_images[pre_image.url_] = pre_image

    for image in images:
        image_xml = etree.SubElement(images_xml, "image")
        set_xml_item_attr(image_xml, 'name', image.name)
        set_xml_item_attr(image_xml, 'description', image.description)
        set_xml_item_attr(image_xml, 'url', image.url)
        set_xml_item_attr(image_xml, 'format', image.format)
        set_xml_item_attr(image_xml, 'mediaType', image.mediaType)
        set_xml_item_attr(image_xml, 'guestOsType', image.guestOsType)
        set_xml_item_attr(image_xml, 'hypervisorType', image.hypervisorType)
        set_xml_item_attr(image_xml, 'bits', image.bits)
        if pre_images.has_key(image.url):
            set_xml_item_attr(image_xml, 'username', \
                    pre_images[image.url].username__)
            set_xml_item_attr(image_xml, 'password', \
                    pre_images[image.url].password__)
        for bs in image.backupStorageRefs:
            cond = res_ops.gen_query_conditions('uuid', '=', \
                    bs.backupStorageUuid)
            bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond, \
                    session_uuid)[0]
            add_xml_item_value(image_xml, 'backupStorageRef', bs.name)
    def check(self):
        super(zstack_kvm_lbl_checker, self).check()
        self.vm_nic_uuids = self.lbl.get_vm_nics_uuid()
        self.algorithm = self.lbl.get_algorithm()
        self.vm_list = []
        self.vm_ip_test_dict = {}

        for vm_nic_uuid in self.vm_nic_uuids:
            vm = test_lib.lib_get_vm_by_nic(vm_nic_uuid)
            if vm.state == 'Running':
                nic_ip = test_lib.lib_get_nic_by_uuid(vm_nic_uuid).ip
                self.vm_ip_test_dict[nic_ip] = 0
                self.vm_list.append(vm)

        if not self.vm_list:
            test_util.test_logger('There is not living vm for load balancer test')
            return self.judge(False)

        cond = res_ops.gen_query_conditions('listeners.uuid', '=', self.lbl_uuid)
        vip_uuid = res_ops.query_resource(res_ops.LOAD_BALANCER, cond)[0].vipUuid
        cond = res_ops.gen_query_conditions('uuid', '=', vip_uuid)
        self.vip_ip = res_ops.query_resource(res_ops.VIP, cond)[0].ip

        if not len(self.vm_list) > 1:
            self.do_so_check()
            return

        if self.algorithm == lb_header.LB_ALGORITHM_RR:
            self.do_rr_check()
        elif self.algorithm == lb_header.LB_ALGORITHM_LC:
            #self.do_lc_check()
            #If not consider long connection, leastconn is same as round robin.
            self.do_rr_check()
        elif self.algorithm == lb_header.LB_ALGORITHM_SO:
            self.do_so_check()
    def check(self):
        super(zstack_kvm_sg_db_exist_checker, self).check()
        sg_list = self.test_obj.get_sg_list_by_nic(self.nic_uuid)

        if not sg_list:
            conditions = res_ops.gen_query_conditions('vmNicUuid', '=', self.nic_uuid)
            nic_sg = res_ops.query_resource(res_ops.VM_SECURITY_GROUP, conditions)
            if not nic_sg:
                test_util.test_logger('Check result: No [Security Group] is found in database for [nic:] %s.' % self.nic_uuid)
                return self.judge(False)
            else:
                test_util.test_warn('Check result: [Security Group] is found in database for [nic:] %s. It is not consistent with test_sg record.' % self.nic_uuid)
                return self.judge(True)

        for test_sg in sg_list:
            try:
                conditions = res_ops.gen_query_conditions('uuid', '=', test_sg.security_group.uuid)
                sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0]
            except Exception as e:
                traceback.print_exc(file=sys.stdout)
                test_util.test_logger('Check result: [Security Group Inventory uuid:] %s does not exist in database.' % test_sg.security_group.uuid)
                return self.judge(False)

        test_util.test_logger('Check result: [SecurityGroup Inventory uuid:] %s exist in database.' % sg.uuid)
        return self.judge(True)
def prepare_host_with_different_cpu_scenario():
    """
    Prepare vms in hosts
    """
    global pre_vms
    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_s')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3NoVlanNetworkName1')
    #l3_name = os.environ.get('l3PublicNetworkName')


    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    #instance_offering_uuid = new_offering.uuid
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)

    ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0].uuid
    hosts = test_lib.lib_find_hosts_by_ps_uuid(ps_uuid)
    host_id = 0
    for host, max_vm_num in zip(hosts,[2,3,1,2]):
        host_id +=1
        for i in range(max_vm_num):
            print "host_id=%s; i=%s" %(host_id, i)
            vm_creation_option.set_name('pre-create-vm-%s-%s' %(host_id, i))
            vm = test_vm_header.ZstackTestVm()
            vm_creation_option.set_host_uuid(host.uuid)
            vm.set_creation_option(vm_creation_option)
            vm.create()
            pre_vms.append(vm)
def test():
    global pxe_uuid
    test_util.test_dsc('Check if another pxe server created')
    zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid
    pxe_servers = res_ops.query_resource(res_ops.PXE_SERVER)
    if pxe_servers != None:
        for pxe in pxe_servers:
            baremetal_ops.delete_pxe(pxe.uuid)

    test_util.test_dsc('Create pxe server and stop/start it')
    [pxe_ip, interface] = test_stub.get_pxe_info()
    pxe_uuid = test_stub.create_pxe(dhcp_interface = interface, hostname = pxe_ip, zoneUuid = zone_uuid).uuid
    #baremetal_operations.attach_pxe_to_cluster(pxe_uuid, baremetal_cluster_uuid)

    pxe = res_ops.query_resource(res_ops.PXE_SERVER)[0]
    if pxe == None:
        test_util.test_fail('Create PXE Server Failed')
    baremetal_ops.stop_pxe(pxe_uuid)
    pxe = res_ops.query_resource(res_ops.PXE_SERVER)[0]
    if pxe.state != "Disabled":
        test_util.test_fail('Disable PXE Server Failed, PXE state: %s' %pxe.status)
    baremetal_ops.start_pxe(pxe_uuid)
    pxe = res_ops.query_resource(res_ops.PXE_SERVER)[0]
    if pxe.state != "Enabled":
        test_util.test_fail('Enable PXE Server Failed, PXE state: %s' %pxe.status)

    baremetal_ops.delete_pxe(pxe_uuid)
    test_util.test_pass('Test PXE Server Success')
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    test_util.test_dsc('Add ISO Image')
    cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid
    img_option = test_util.ImageOption()
    img_option.set_name('iso')
    img_option.set_backup_storage_uuid_list([bs_uuid])
    os.system("echo fake iso for test only >  %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath')))
    img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip')))
    image_inv = img_ops.add_iso_template(img_option)
    image = test_image.ZstackTestImage()
    image.set_image(image_inv)
    image.set_creation_option(img_option)
    test_obj_dict.add_image(image)


    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to maintain state')
    vm.set_state(vm_header.STOPPED)
    vm.check()

    test_util.test_dsc('Attach ISO to VM')
    cond = res_ops.gen_query_conditions('name', '=', 'iso')
    iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
    img_ops.attach_iso(iso_uuid, vm.vm.uuid)


    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)  

    vm.start()
    vm.check()
    vm.destroy()
    vm.check()
    #vm.expunge()
    #vm.check()
    test_util.test_pass('PS maintain mode Test Success')
def path():
    cond = res_ops.gen_query_conditions('state', '=', "Enabled")
    cond = res_ops.gen_query_conditions('status', '=', "Connected", cond)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
    cond_imagestore = res_ops.gen_query_conditions('type', '=', "ImageStoreBackupStorage", cond)
    cond_ceph = res_ops.gen_query_conditions('type', '=', "Ceph", cond)
    imagestore = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_imagestore)
    ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_ceph)
    san_ps = [ps.uuid for ps in ps_inv if ps.type == 'SharedBlock']
    ceph_ps = [ps.uuid for ps in ps_inv if ps.type == 'Ceph']
    san_vms = ['utility_vm_for_robot_test' + '-' + ps.name for ps in ps_inv if ps.type == 'SharedBlock']
    ceph_vms = ['utility_vm_for_robot_test' + '-' + ps.name for ps in ps_inv if ps.type == 'Ceph']

    if san_ps and ceph_ps:
        return dict(initial_formation="template3",
                    path_list=[[TestAction.create_volume, "volume1", "=ps_uuid::%s" % san_ps[0]],
                               [TestAction.attach_volume, san_vms[-1], "volume1"],
                               [TestAction.resize_volume, san_vms[-1], 5*1024*1024],
                               [TestAction.clone_vm, san_vms[-1], "vm2", "=full"],
                               [TestAction.detach_volume, "volume1"],
                               [TestAction.attach_volume, ceph_vms[0], "volume1"],
                               [TestAction.stop_vm, san_vms[-1]], 
                               [TestAction.reinit_vm, san_vms[-1]], 
                               [TestAction.start_vm, san_vms[-1]],
                               [TestAction.clone_vm, ceph_vms[0], "vm3"],
                               [TestAction.resize_volume, ceph_vms[0], 5*1024*1024], 
                               [TestAction.detach_volume, "volume1"],
                               [TestAction.create_image_from_volume, ceph_vms[0], 'image_created_from_%s' % ceph_vms[0], "=bs_uuid::%s" % imagestore[0].uuid],
                               [TestAction.create_vm_by_image, 'image_created_from_%s' % ceph_vms[0], 'qcow2', 'vm4', '=ps_uuid::%s' % random.choice(san_ps)],
                               [TestAction.stop_vm, 'vm4'],
                               [TestAction.ps_migrage_vm, 'vm4'],
                               [TestAction.attach_volume, 'vm4', "volume1"]])
    else:
        return dict(initial_formation="template3", path_list=[])
def test():
    global test_obj_dict, bs, ps
    #judge whether BS is imagestore
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)
    for i in bs:
        if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
	    break
    else:
        test_util.test_skip('Skip test on non-imagestore')

    #judge whether PS is SharedBlock
    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
    for i in ps:
        if i.type in ['SharedBlock', 'AliyunNAS']:
            test_util.test_skip('Skip test on SharedBlock and PS')

    image_name = os.environ.get('imageName_s')
    l3_name = os.environ.get('l3PublicNetworkName')
    vm = test_stub.create_vm("test_vm", image_name, l3_name)
    #vm.check()
    test_obj_dict.add_vm(vm)

    new_vm = vm.clone(['test_vm_clone_with_on_data_volume'], full=True)[0]
    test_obj_dict.add_vm(new_vm)

    volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm))
    if volumes_number != 1:
        test_util.test_fail('Did not find 1 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number))
    else:
        test_util.test_logger('Find 1 volumes for [vm:] %s.' % new_vm.vm.uuid)

    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass('Test clone vm with one data volume Success')
def test():

    global bs_username, bs_hostname, bs_password, bs_name, bs_username, bs_url, bs_sshport
    global new_image    

    file_path = test_stub.gen_license('woodpecker', '*****@*****.**', '1', 'Prepaid', '1', '')
    test_stub.load_license(file_path)
    issued_date = test_stub.get_license_info().issuedDate
    expired_date = test_stub.license_date_cal(issued_date, 86400 * 1)
    test_stub.check_license("*****@*****.**", 1, None, False, 'Paid', issued_date=issued_date, expired_date=expired_date)

    test_util.test_logger('create zone and add the bs of the imagestore')
    node_uuid = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].uuid
    test_stub.create_zone()
    zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid

    bs_name = 'BS1'
    bs_hostname = os.environ.get('node1Ip')
    bs_username = os.environ.get('nodeUserName')
    bs_password = os.environ.get('nodePassword')
    bs_url = '/zstack_bs'
    bs_sshport = '22'
    test_stub.create_image_store_backup_storage(bs_name, bs_hostname, bs_username, bs_password, bs_url, bs_sshport)
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0].uuid

    test_stub.reload_default_license()
    test_util.test_logger('Check default community license')
    #test_stub.check_license(None, None, 2147483647, False, 'Community')

    try:
        bs_ops.reconnect_backup_storage(bs_uuid)
    except Exception, e:
        if "commercial" in str(e):
            test_util.test_pass('test reconnect bs failed, An operation failed, details: commercial license is required to use ImageStore.')
def test():
    global test_obj_dict
    volume_creation_option = test_util.VolumeOption()
    ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].uuid
    test_util.test_dsc('Create volume and check')
    disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume_creation_option.set_primary_storage_uuid(ps_uuid)
    if res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].type == "LocalStorage":
        host = test_lib.lib_find_random_host()
        volume_creation_option.set_system_tags(["localStorage::hostUuid::%s" % host.uuid])
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume_uuid = volume.volume.uuid
    vol_size = volume.volume.size

    set_size = 1024*1024*1024*5
    vol_ops.resize_data_volume(volume_uuid, set_size)
    cond = res_ops.gen_query_conditions('type', '=', "Data")
    cond = res_ops.gen_query_conditions('status', '=', "Ready", cond)
    vol_size_after  = res_ops.query_resource(res_ops.VOLUME, cond)[0].size
    if set_size != vol_size_after:
        test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after)
    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass('Resize Data Volume Test Success')
def test():
    global agent_url
    global vm
    imagestore = test_lib.lib_get_image_store_backup_storage()
    if imagestore == None:
        test_util.test_skip('Required imagestore to test')
    image_uuid = test_stub.get_image_by_bs(imagestore.uuid)
    cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint')
    pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)

    if len(pss) == 0:
        test_util.test_skip('Required %s ps to test' % (ps_type))
    ps_uuid = pss[0].uuid
    vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid)


    vm.stop()
    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid,'NeverStop')
    cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid)
    for i in range(5):
        time.sleep(1)
        try:
            if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING:
                break
        except:
            test_util.test_logger('Retry until VM change to running')

    if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING:
        test_util.test_pass('set HA after stopped VM test pass')
    
    test_util.test_fail('set HA after stopped VM test fail')
Example #14
0
def umount_all_primary_storages_violently():
    session_uuid = acc_ops.login_as_admin()
    zones = res_ops.query_resource(res_ops.ZONE)
    for zone in zones:
        conditions = res_ops.gen_query_conditions('zoneUuid', '=', zone.uuid)
        conditions = res_ops.gen_query_conditions('state', '=', 'Enabled', conditions)
        pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, conditions, session_uuid)
        conditions = res_ops.gen_query_conditions('zoneUuid', '=', zone.uuid)
        conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED, conditions)
        conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions)
        conditions = res_ops.gen_query_conditions('hypervisorType', '=', inventory.KVM_HYPERVISOR_TYPE, conditions)
        all_hosts = res_ops.query_resource(res_ops.HOST, conditions, session_uuid)
        for host in all_hosts:
            for ps in pss:
                ps_url = ps.mountPath
                thread = threading.Thread(\
                        target = umount_primary_storage_violently, \
                        args = (host.managementIp, ps_url))
                thread.start()

    while threading.active_count() > 1:
        time.sleep(0.1)

    acc_ops.logout(session_uuid)
    delete_ps_ceph_pools()
def test():
    global host
    if test_lib.lib_get_active_host_number() < 2:
        test_util.test_fail('Not available host to do maintenance, since there are not 2 hosts')

    vm_cpu = 1
    vm_memory = 1073741824 #1G
    cond = res_ops.gen_query_conditions('name', '=', 'ttylinux')
    image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
    l3_network_uuid = res_ops.query_resource(res_ops.L3_NETWORK)[0].uuid
    vm = test_stub.create_mini_vm([l3_network_uuid], image_uuid, cpu_num = vm_cpu, memory_size = vm_memory)
    test_obj_dict.add_vm(vm)

    host_uuid = test_lib.lib_get_vm_host(vm.vm).uuid
    host_ops.change_host_state(host_uuid, 'maintain')

    #need to update vm's inventory, since they will be changed by maintenace mode
    vm.update()
    vm.set_state(vm_header.STOPPED)

    vm.check()

    host_ops.change_host_state(host_uuid, 'enable')
    if not linux.wait_callback_success(is_host_connected, host_uuid, 120):
        test_util.test_fail('host status is not changed to connected, after changing its state to Enable')

    vm.start()
    vm.check()
    vm.destroy()
    test_obj_dict.rm_vm(vm)
    test_util.test_pass('Maintain Host Test Success')
def test():
    test_util.test_dsc("check all hosts chrony status")
    host_uuid_list = []
    host_ip_list = []
    host_port_list = []
    hosts = {}

    for host_id in range(len(res_ops.query_resource(res_ops.HOST))):
        managementIp = res_ops.query_resource(res_ops.HOST)[host_id].managementIp
        sshPort = res_ops.query_resource(res_ops.HOST)[host_id].sshPort
        uuid = res_ops.query_resource(res_ops.HOST)[host_id].uuid
        host_ip_list.append(managementIp)
        host_port_list.append(sshPort)
        host_uuid_list.append(uuid)
    hosts = dict(zip(host_ip_list, host_port_list))
    print "hosts is %s" %(hosts)
    for k, v in hosts.items():
        check_chrony_status(k, v)
    for host_uuid in host_uuid_list:
        host_ops.reconnect_host(host_uuid)
        time.sleep(5)
    for k, v in hosts.items():
        check_chrony_status(k, v)

    host_ops.change_host_state(host_uuid_list[0], "disable")
    for k, v in hosts.items():
        check_chrony_status(k, v)
    host_ops.change_host_state(host_uuid_list[0], "enable")

    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass('Test chrony Success')
def compare(ps, vm, dvol, backup):
    test_util.test_logger("-----------------compare----------------")
    # find vm_host
    host = test_lib.lib_find_host_by_vm(vm.vm)
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]

    cond = res_ops.gen_query_conditions("uuid", '=', dvol.volume.uuid)
    current_volume = res_ops.query_resource(res_ops.VOLUME, cond)[0]

    vol_path = current_volume.installPath
    if ps.type == "SharedBlock":
        vol_path = "/dev/" + current_volume.installPath.split("/")[2] + "/" + current_volume.installPath.split("/")[3]
    test_util.test_logger(vol_path)

    name = backup.backupStorageRefs[0].installPath.split("/")[2]
    id = backup.backupStorageRefs[0].installPath.split("/")[3]
    # compare vm_root_volume & image
    cmd = "mkdir /root/%s;" \
          "/usr/local/zstack/imagestore/bin/zstcli " \
          "-rootca=/var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem " \
          "-url=%s:8000 " \
          "pull -installpath /root/%s/old.qcow2 %s:%s;" \
          "qemu-img compare %s /root/%s/old.qcow2;" % (id, bs.hostname, id, name, id, vol_path, id)
    # clean image
    result = test_lib.lib_execute_ssh_cmd(host.managementIp, "root", "password", cmd, timeout=300)
    if result != "Images are identical.\n":
        test_util.test_fail("compare vm_root_volume & image created by backup")
Example #18
0
def add_storage_for_backup(deployConfig):
    print "try to add backup storage"
    if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'):
        print "find image store backup storage"
        for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage):
            if hasattr(bs, 'local_backup_storage_'):
                print "find local_backup_storage"
                cond = res_ops.gen_query_conditions('tag', '=', "allowbackup")
                tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)
                if len(tags) > 0:
                    print "local backup storage already exists"
                    break
                cond = res_ops.gen_query_conditions('name', '=', bs.name_)
                bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)
                print bss
                add_local_bs_tag = tag_ops.create_system_tag('ImageStoreBackupStorageVO', bss[0].uuid,'allowbackup')
    if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'):
        for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage):
            if hasattr(bs, 'remote_backup_storage_'):
                print "find remote_backup_storage"
                cond = res_ops.gen_query_conditions('tag', '=', "remotebackup")
                tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)
                if len(tags) > 0:
                    print "remote backup storage already exists"
                    break
                cond = res_ops.gen_query_conditions('name', '=', bs.name_)
                bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)
                print bss
                add_local_bs_tag = tag_ops.create_system_tag('ImageStoreBackupStorageVO', bss[0].uuid,'remotebackup')
Example #19
0
 def get_bs(self):
     if self.ceph_bs_name:
         conditions = res_ops.gen_query_conditions('name', '=', self.ceph_bs_name)
         self.ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, conditions)[0]
     if self.ceph_bs_name_2:
         conditions = res_ops.gen_query_conditions('name', '=', self.ceph_bs_name_2)
         self.ceph_bs_2 = res_ops.query_resource(res_ops.BACKUP_STORAGE, conditions)[0]
def test():
    global session_to
    global session_mc
    global session_uuid
    session_uuid = acc_ops.login_as_admin()
    session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
    session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
    cond = res_ops.gen_query_conditions('type', '=', inventory.USER_VM_TYPE)
    num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)

    if num <= thread_threshold:
        vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond, session_uuid)
        destroy_vms(vms)
    else:
        start = 0
        limit = thread_threshold - 1
        curr_num = start
        vms = []
        while curr_num < num:
            vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \
                    cond, session_uuid, ['uuid'], start, limit)
            vms.extend(vms_temp)
            curr_num += limit
            start += limit
        destroy_vms(vms)

    vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid)

    if vip_num <= thread_threshold:
        vips = res_ops.query_resource(res_ops.VIP, [], session_uuid)
        destroy_vips(vips)
    else:
        start = 0
        limit = thread_threshold - 1
        curr_num = start
        vms = []
        while curr_num < vip_num:
            vips_temp = res_ops.query_resource_fields(res_ops.VIP, \
                    [], session_uuid, ['uuid'], start, limit)
            vips.extend(vips_temp)
            curr_num += limit
            start += limit
        destroy_vips(vips)
    #con_ops.change_global_config('identity', 'session.timeout', session_to)
    #con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc)
    left_num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
    if left_num == 0:
        test_util.test_pass('None VR VMs destroy Success. Destroy %d VMs.' % num)
    else:
        test_util.test_fail('None VR VMs destroy Fail. %d VMs are not Destroied.' % left_num)

    left_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid)
    if left_num == 0:
        test_util.test_pass('VIP destroy Success. Destroy %d VIP.' % num)
    else:
        test_util.test_fail('VIP destroy Fail. %d VIP are not Destroied.' % left_num)

    con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
    con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
    acc_ops.logout(session_uuid)
def test():
    h1_name = os.environ.get("hostName")
    cond = res_ops.gen_query_conditions('name', '=', h1_name)
    h1 = res_ops.query_resource(res_ops.HOST, cond)
    ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard")
    vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h1[0].uuid)
    assert vm1.get_vm().hostUuid == h1[0].uuid
    test_obj_dict.add_vm(vm1)

    h2_name = os.environ.get("hostName2")
    cond = res_ops.gen_query_conditions('name', '=', h2_name)
    h2 = res_ops.query_resource(res_ops.HOST, cond)
    vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h2[0].uuid)
    assert vm2.get_vm().hostUuid == h2[0].uuid
    test_obj_dict.add_vm(vm2)

    try:
        vm1.migrate(vm2.get_vm().hostUuid)
    except:
        test_util.test_logger("vm1 is not expected to migrate to host2 [uuid: %s]" % vm2.get_vm().hostUuid)

    h3_name = os.environ.get("hostName3")
    cond = res_ops.gen_query_conditions('name', '=', h3_name)
    h3 = res_ops.query_resource(res_ops.HOST, cond)

    vm1.migrate(h3[0].uuid)
    vm1.migrate(h1[0].uuid)

    test_lib.lib_error_cleanup(test_obj_dict)
    ag_ops.delete_affinity_group(ag1.uuid)
    test_util.test_pass("Affinity Group antiHard policy pass")
def test():
    global ipsec
    global vip1_uuid
    global vpc_vr
    cond = res_ops.gen_query_conditions('name', '=', 'public network') 
    public_network = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0]
    vip1 = test_stub.create_vip('vip_ipsec', public_network.uuid)
    vip1_uuid = vip1.get_vip().uuid
    test_util.test_dsc('Create vpc vr and attach networks')
    vpc_vr = test_stub.create_vpc_vrouter()

    cond = res_ops.gen_query_conditions('name', '=', 'l3VlanNetwork11')
    l3_vlan_network11 = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0]
    vpc_vr.add_nic(l3_vlan_network11.uuid)

    peer_address = '10.94.10.10'
    
    try:
        ipsec = ipsec_ops.create_ipsec_connection('ipsec', None, peer_address, '123456', vip1_uuid, None)
    except:
        test_util.test_fail('Failed to create vpc ipsec')

    test_stub.delete_vip(vip1_uuid)
    vpc_vr.destroy()
    ipsec_ops.delete_ipsec_connection(ipsec.uuid)
    test_util.test_pass('Create VPC Ipsec Success')
def test():
    h1_name = os.environ.get("hostName")
    cond = res_ops.gen_query_conditions('name', '=', h1_name)
    h1 = res_ops.query_resource(res_ops.HOST, cond)
    ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard")
    vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h1[0].uuid)
    assert vm1.get_vm().hostUuid == h1[0].uuid
    test_obj_dict.add_vm(vm1)

    h2_name = os.environ.get("hostName2")
    cond = res_ops.gen_query_conditions('name', '=', h2_name)
    h2 = res_ops.query_resource(res_ops.HOST, cond)
    vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h2[0].uuid)
    assert vm2.get_vm().hostUuid == h2[0].uuid
    test_obj_dict.add_vm(vm2)

    try:
        vm3 = None
        vm3 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h2[0].uuid)
    except:
        if not vm3:
            test_util.test_logger("vm3 isn't created as expected")
    finally:
        if vm3:
            test_util.test_fail("Test Fail, vm3 [uuid:%s] is not expected to be created" % vm3.get_vm().uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
    ag_ops.delete_affinity_group(ag1.uuid)
    test_util.test_pass("Affinity Group antiHard policy pass")
def test():
    global test_obj_dict, VM_RUNGGING_OPS, VM_STOPPED_OPS, VM_STATE_OPS, backup

    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
    if ps.type == inventory.LOCAL_STORAGE_TYPE:
        VM_RUNGGING_OPS.append("VM_TEST_MIGRATE")
        VM_STOPPED_OPS.append("VM_TEST_MIGRATE")
    else:
        VM_RUNGGING_OPS.append("VM_TEST_MIGRATE")

    vm_name = "test_vm"
    cond = res_ops.gen_query_conditions("system", '=', "false")
    cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
    cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
    img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name
    cond = res_ops.gen_query_conditions("category", '=', "Private")
    l3_name = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].name
    vm = test_stub.create_vm(vm_name, img_name, l3_name)

    i = 0
    while True:
        i += 1
        if i == 10:
            vm_op_test(vm, "VM_TEST_STOP")
            vm_op_test(vm, "VM_TEST_RESET")
            vm.start()
            time.sleep(60)
            vm.check()
            i = 0

        vm_op_test(vm, random.choice(VM_STATE_OPS))
        VM_OPS = VM_STATE_OPS
        if vm.state == "Running":
            VM_OPS = VM_RUNGGING_OPS
            if not backup_list:
                VM_OPS.remove("VM_TEST_BACKUP_IMAGE")
        elif vm.state == "Stopped":
            VM_OPS = VM_STOPPED_OPS
            if not backup_list:
                VM_OPS.remove("VM_TEST_REVERT_BACKUP")
                VM_OPS.remove("VM_TEST_BACKUP_IMAGE")       

        vm_op_test(vm, random.choice(VM_OPS))

        if vm.state == "Stopped":
            vm.start()

        if test_lib.lib_is_vm_l3_has_vr(vm.vm):
            test_lib.TestHarness = test_lib.TestHarnessVR
        time.sleep(60)
        cmd = "echo 111 > /root/" + str(int(time.time()))
        test_lib.lib_execute_command_in_vm(vm.vm,cmd)
        vm.suspend()
        # create_snapshot/backup
        vm_op_test(vm, "VM_TEST_BACKUP")
        # compare vm & image created by backup
        compare(ps, vm, backup)

        vm.resume()
def Create(vm_name_prefix):
    global session_uuid
    global session_to
    global session_mc

    session_uuid = None
    session_to = None
    session_mc = None

    vm_num = os.environ.get('ZSTACK_TEST_NUM')
    if not vm_num:
       vm_num = 1000
    else:
       vm_num = int(vm_num)

    test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
    test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_s')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid

    cond = res_ops.gen_query_conditions('category', '=', 'Private')
    l3net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, cond, session_uuid)[0].uuid
    l3s = test_lib.lib_get_l3s()
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    #change account session timeout. 
    session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
    session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)

    session_uuid = acc_ops.login_as_admin()

    vm_creation_option.set_session_uuid(session_uuid)

    vm = test_vm_header.ZstackTestVm()
    vm_creation_option.set_l3_uuids([l3net_uuid])
          
    while vm_num > 0:
        check_thread_exception()
        vm_name = '%s_%s' % (vm_name_prefix, str(vm_num))
        vm_creation_option.set_name(vm_name)
        vm.set_creation_option(vm_creation_option)
        vm_num -= 1
        thread = threading.Thread(target=create_vm, args=(vm,))
        while threading.active_count() > thread_threshold:
            time.sleep(1)
        thread.start()

    while threading.active_count() > 1:
        time.sleep(0.05)

    cond = res_ops.gen_query_conditions('name', '=', vm_name)
    vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
    con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
    con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
    acc_ops.logout(session_uuid)
def test():
    #skip ceph in c74
    cmd = "cat /etc/redhat-release | grep '7.4'"
    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    rsp = test_lib.lib_execute_ssh_cmd(mn_ip, 'root', 'password', cmd, 180)
    if rsp != False:
        ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
        for i in ps:
            if i.type == 'Ceph':
                test_util.test_skip('cannot hotplug iso to the vm in ceph,it is a libvirt bug:https://bugzilla.redhat.com/show_bug.cgi?id=1541702.')    

    global iso
    global test_obj_dict

    # run condition
    allow_bs_list = [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE]
    test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list)

    hosts = res_ops.query_resource(res_ops.HOST)
    if len(hosts) <= 1:
        test_util.test_skip("skip for host_num is not satisfy condition host_num>1")

    # add iso and create vm from iso
    iso = test_stub.add_test_minimal_iso('minimal_iso')
    test_obj_dict.add_image(iso)
    root_volume_offering = test_stub.add_test_root_volume_offering('root-disk-iso', 10737418240)
    test_obj_dict.add_disk_offering(root_volume_offering)
    vm_offering = test_stub.add_test_vm_offering(2, 1024*1024*1024, 'iso-vm-offering')
    test_obj_dict.add_instance_offering(vm_offering)
    vm = test_stub.create_vm_with_iso_for_test(vm_offering.uuid, iso.image.uuid, root_volume_offering.uuid, 'iso-vm')
    test_obj_dict.add_vm(vm)

    # check vm
    vm_inv = vm.get_vm()
    test_lib.lib_set_vm_host_l2_ip(vm_inv)
    test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800)

    # clone vm
    cloned_vm_name = ['cloned_vm']
    cloned_vm_obj = vm.clone(cloned_vm_name)[0]
    test_obj_dict.add_vm(cloned_vm_obj)

    # delete iso
    iso.delete()

    # vm ops test
    test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_ATTACH")

    # expunge iso
    iso.expunge()

    #detach iso
    img_ops.detach_iso(vm.vm.uuid)

    # vm ops test
    test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_MIGRATE")

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Cloned VM ops for BS Success')
def path():
    cond = res_ops.gen_query_conditions('state', '=', "Enabled")
    cond = res_ops.gen_query_conditions('status', '=', "Connected", cond)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
    cond_imagestore = res_ops.gen_query_conditions('type', '=', "ImageStoreBackupStorage", cond)
    cond_ceph = res_ops.gen_query_conditions('type', '=', "Ceph", cond)
    imagestore = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_imagestore)
    ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_ceph)
    san_ps = [ps.uuid for ps in ps_inv if ps.type == 'SharedBlock']
    ceph_ps = [ps.uuid for ps in ps_inv if ps.type == 'Ceph']
    san_vms = [VM('utility_vm_for_robot_test' + '-' + ps.name) for ps in ps_inv if ps.type == 'SharedBlock']
    ceph_vms = [VM('utility_vm_for_robot_test' + '-' + ps.name) for ps in ps_inv if ps.type == 'Ceph']
    vm2 = VM('vm2')

    if san_ps and ceph_ps:
        return dict(initial_formation="template3",
                    path_list=[[TestAction.create_volume, "ceph_volume1", "=ps_uuid::%s" % ceph_ps[0]],
                               [TestAction.attach_volume, san_vms[-1].name, "ceph_volume1"],
                               [TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot1"],
                               [TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp1'],
                               [TestAction.resize_volume, san_vms[-1].name, 5*1024*1024],
                               [TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp2'],
                               san_vms[-1].stop,
                               [TestAction.reinit_vm, san_vms[-1].name],
                               san_vms[-1].start,
                               san_vms[-1].migrate,
                               [TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot2"],
                               san_vms[-1].clone(4),
                               [TestAction.detach_volume, "ceph_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[0], "ceph_volume1"],
                               [TestAction.detach_volume, "ceph_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[1], "ceph_volume1"],
                               [TestAction.detach_volume, "ceph_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[2], "ceph_volume1"],
                               [TestAction.detach_volume, "ceph_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[3], "ceph_volume1"],
                               [TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp3'],
                               [TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot1"],
                               [TestAction.create_volume, "san_shared_volume1", "=ps_uuid::%s,scsi,shareable" % random.choice(san_ps)],
                               ceph_vms[0].migrate,
                               ceph_vms[0].clone(4),
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[0], "san_shared_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[1], "san_shared_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[2], "san_shared_volume1"],
                               [TestAction.attach_volume, san_vms[-1].cloned_name_list[3], "san_shared_volume1"],
                               [TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[0] + '-root', san_vms[-1].cloned_name_list[0] + '-sp1'],
                               [TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[1] + '-root', san_vms[-1].cloned_name_list[1] + '-sp1'],
                               [TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[2] + '-root', san_vms[-1].cloned_name_list[2] + '-sp1'],
                               [TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[3] + '-root', san_vms[-1].cloned_name_list[3] + '-sp1'],
                               [TestAction.delete_volume, "ceph_volume1"],
                               [TestAction.delete_vm, san_vms[-1].cloned_name_list[0]],
                               [TestAction.delete_vm, san_vms[-1].cloned_name_list[1]],
                               [TestAction.delete_vm, san_vms[-1].cloned_name_list[2]],
                               [TestAction.delete_vm, san_vms[-1].cloned_name_list[3]],
                               
                               ])
    else:
        return dict(initial_formation="template3", path_list=[])
def test():
    global vm
    vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2')
    test_obj_dict.add_vm(vm)
    ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid)
    if ps.type != inventory.LOCAL_STORAGE_TYPE:
        test_util.test_skip('Skip test on non-localstorage')
 
    vm_inv = vm.get_vm()
    vm_uuid = vm_inv.uuid

    test_util.test_dsc('Add ISO Image')
    cond = res_ops.gen_query_conditions('name', '=', 'sftp') 
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid    

    img_option = test_util.ImageOption()
    img_option.set_name('iso')
    img_option.set_backup_storage_uuid_list([bs_uuid])
    mn = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0]
    cmd = "echo fake iso for test only >  %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso" % (os.environ.get('zstackInstallPath'))
    if os.system("ip r | grep %s" % (mn.hostName)) == 0:
        os.system(cmd)
    else:
        for host in test_lib.lib_get_all_hosts_from_plan():
            test_util.test_logger("host.managementIp_: %s" %(host.managementIp_))
            test_util.test_logger("mn.hostName: %s" %(mn.hostName))
            test_util.test_logger("anotherIp: %s" %(test_stub.get_another_ip_of_host(host.managementIp_, host.username_, host.password_)))
            if host.managementIp_ == mn.hostName or test_stub.get_another_ip_of_host(host.managementIp_, host.username_, host.password_) == mn.hostName:
                out = test_lib.lib_execute_ssh_cmd(host.managementIp_, host.username_, host.password_, cmd, timeout=30)

    img_option.set_url('http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (mn.hostName))
    image_inv = img_ops.add_iso_template(img_option)
    image = test_image.ZstackTestImage()
    image.set_image(image_inv)
    image.set_creation_option(img_option)

    test_obj_dict.add_image(image)

    test_util.test_dsc('Attach ISO to VM')
    cond = res_ops.gen_query_conditions('name', '=', 'iso')
    iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
    img_ops.attach_iso(iso_uuid, vm_uuid)

    test_util.test_dsc('Migrate VM')
    vm.check()
    target_host = test_lib.lib_find_random_host(vm.vm)
    vm.stop()
    vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid)
    vm.check()
    vm.start()
    vm.check()

    img_ops.detach_iso(vm_uuid)
    image.delete()
    image.expunge()
    test_obj_dict.rm_image(image)
    vm.destroy()
    test_util.test_pass('Migrate Stopped VM Test Success When Attach ISO')
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)
    
    test_stub.skip_if_not_storage_network_separate(test_lib.all_scenario_config)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    vr_hosts = test_stub.get_host_has_vr()
    mn_hosts = test_stub.get_host_has_mn()
    nfs_hosts = test_stub.get_host_has_nfs()
    if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts):
        test_util.test_fail("Not find out a suitable host")

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" %(host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    #test_stub.down_host_network(host_ip, test_lib.all_scenario_config)
    host_username = os.environ.get('hostUsername')
    host_password = os.environ.get('hostPassword')
    t = test_stub.async_exec_ifconfig_nic_down_up(1200, host_ip, host_username, host_password, "zsn1")

    vm.destroy()

    test_util.test_pass('Test VM ha change to running within 300s Success')
def recover_vm_backup_from_remote(vm_obj):
    backup_groupuuid = random.choice(backup_list.pop(random.randint(0, len(backup_list)-1))).groupUuid
    cond = res_ops.gen_query_conditions("groupUuid", '=', backup_groupuuid)
    src = res_ops.query_resource(res_ops.VOLUME_BACKUP, cond)[0].backupStorageRefs[0].backupStorageUuid
    cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
    dst = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid
    if src == dst:
        dst = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[1].uuid
    vol_ops.recover_vm_backup_from_remote(backup_groupuuid, src, dst)
Example #31
0
def test():
    global linked_account_uuid, project_uuid, project_operator_uuid, account_lists, vni_range_uuid, vxlan_pool_uuid, l2_vxlan_network_uuid, account1_uuid, account2_uuid

    # create vxlan pool and vni range
    zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
    cluster_uuid = res_ops.get_resource(res_ops.CLUSTER)[0].uuid
    vxlan_pool_name = 'vxlan_pool_name'

    vxlan_pool_uuid = vxlan_ops.create_l2_vxlan_network_pool(
        vxlan_pool_name, zone_uuid).uuid
    vxlan_ops.create_vni_range('vni_range', 20, 40, vxlan_pool_uuid)

    systemTags = [
        "l2NetworkUuid::%s::clusterUuid::%s::cidr::{172.20.0.1/16}" %
        (vxlan_pool_uuid, cluster_uuid)
    ]
    net_ops.attach_l2_vxlan_pool(vxlan_pool_uuid, cluster_uuid, systemTags)

    # 1 create project
    project_name = 'test_share_project1'
    project = iam2_ops.create_iam2_project(project_name)
    project_uuid = project.uuid
    cond = res_ops.gen_query_conditions("name", '=', "test_share_project1")
    linked_account_uuid = res_ops.query_resource(res_ops.ACCOUNT, cond)[0].uuid

    # 2 create project operator
    project_operator_name = 'share_username1'
    project_operator_password = '******'
    attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
    project_operator_uuid = iam2_ops.create_iam2_virtual_id(
        project_operator_name,
        project_operator_password,
        attributes=attributes).uuid
    # 3 login in project by project operator
    iam2_ops.add_iam2_virtual_ids_to_project([project_operator_uuid],
                                             project_uuid)
    project_operator_session_uuid = iam2_ops.login_iam2_virtual_id(
        project_operator_name, project_operator_password)
    project_login_uuid = iam2_ops.login_iam2_project(
        project_name, session_uuid=project_operator_session_uuid).uuid
    # todo:use the shared resources

    # 4 share admin resources to project
    test_stub.share_admin_resource_include_vxlan_pool([linked_account_uuid])
    # use the shared resources to create vm
    vm = test_stub.create_vm(session_uuid=project_login_uuid)
    volume = test_stub.create_volume(session_uuid=project_login_uuid)
    test_obj_dict.add_volume(volume)
    test_obj_dict.add_vm(vm)
    l2_vxlan_network_uuid = vxlan_ops.create_l2_vxlan_network(
        'l2_vxlan',
        vxlan_pool_uuid,
        zone_uuid,
        session_uuid=project_login_uuid).uuid
    virtual_router_offering_uuid = res_ops.get_resource(
        res_ops.VR_OFFERING)[0].uuid
    vpc_ops.create_vpc_vrouter('vpc_router',
                               virtual_router_offering_uuid,
                               session_uuid=project_login_uuid)

    # 5 revoke admin resources from project
    test_stub.revoke_admin_resource([linked_account_uuid])

    # 6 share to all
    #create_account
    account1_uuid = acc_ops.create_account('user1', 'password', 'Normal').uuid
    account2_uuid = acc_ops.create_account('user2', 'password', 'Normal').uuid

    account_lists = res_ops.query_resource(res_ops.ACCOUNT)
    for account in account_lists:
        test_stub.share_admin_resource_include_vxlan_pool([account.uuid])

    # 7 revoke resources from all
    for account in account_lists:
        test_stub.revoke_admin_resource([account.uuid])

    # 8 Negative test
    test_util.test_dsc(
        'Doing negative test.Try to use the resources not shared to create vm')
    try:
        test_stub.create_vm(session_uuid=project_login_uuid)
    except:
        test_util.test_logger(
            'Catch excepted excepttion.can not use the resources not shared to create vm'
        )
    else:
        test_util.test_fail(
            'Catch wrong logic:create vm success with the resources not shared '
        )

    test_util.test_dsc(
        'Doing negative test.Try to use the resources not shared to create volume'
    )
    try:
        test_stub.create_volume(session_uuid=project_login_uuid)
    except:
        test_util.test_logger(
            'Catch excepted excepttion.can not use the resources not shared to create volume'
        )
    else:
        test_util.test_fail(
            'Catch wrong logic:create volume success with the resources not shared '
        )

    test_util.test_dsc(
        'Doing negative test.Try to use the resources not shared to create vxlan network'
    )
    try:
        vxlan_ops.create_l2_vxlan_network('l2_vxlan',
                                          vxlan_pool_uuid,
                                          zone_uuid,
                                          session_uuid=project_login_uuid)
    except:
        test_util.test_logger(
            'Catch excepted excepttion.can not use the resources not shared to create l2 vxlan'
        )
    else:
        test_util.test_fail(
            'Catch wrong logic:create l2 vxlan success with the resources not shared '
        )

    test_util.test_dsc(
        'Doing negative test.Try to use the resources not shared to create vpc_vrouter '
    )
    try:
        vpc_ops.create_vpc_vrouter('vpc_router',
                                   virtual_router_offerings,
                                   session_uuid=project_login_uuid)
    except:
        test_util.test_logger(
            'Catch excepted excepttion.can not use the resources not shared to create vpc_router'
        )
    else:
        test_util.test_fail(
            'Catch wrong logic:create vpc_router success with the resources not shared '
        )

    # 9 delete
    acc_ops.logout(project_login_uuid)
    iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
    iam2_ops.delete_iam2_project(project_uuid)
    iam2_ops.expunge_iam2_project(project_uuid)
    vni_range_uuid = res_ops.get_resource(res_ops.VNI_RANGE)[0].uuid
    vxlan_ops.delete_vni_range(vni_range_uuid)
    vpc_ops.remove_all_vpc_vrouter()
    test_lib.lib_error_cleanup(test_obj_dict)

    net_ops.delete_l2(vxlan_pool_uuid)
    net_ops.delete_l2(l2_vxlan_network_uuid)

    acc_ops.delete_account(account1_uuid)
    acc_ops.delete_account(account2_uuid)
def test():
    test_util.test_dsc("Test Stack template Apis")

    cond = res_ops.gen_query_conditions('name', '=', "test")
    stack_template_queried = res_ops.query_resource(res_ops.STACK_TEMPLATE,
                                                    cond)
    if len(stack_template_queried) != 0:
        stack_template_ops.delete_stack_template(
            stack_template_queried[0].uuid)

    stack_template_option = test_util.StackTemplateOption()
    stack_template_option.set_name("test")
    templateContent = '''
{
  "ZStackTemplateFormatVersion": "2018-06-18",
  "Description": "Test ZStack formation functions",
  "Parameters": {
    "4TestBoolean2": {
      "Type": "Boolean",
      "Description": "测试boolean ",
      "DefaultValue": false
    },
    "ZoneUuid": {
      "Type": "String",
      "Description": "测试boolean ",
      "DefaultValue": "zoneuuid"
    },
    "ClusterUuid": {
      "Type": "String",
      "DefaultValue": "clusteruuid"
    },
    "PrimaryStorageUuidForRootVolume": {
      "Type": "String",
      "Description": "主存储Uuid ",
      "DefaultValue": "primarystorageuuidforrootvolume"
    },
    "ImageUrl": {
      "Type": "String",
      "Description": "镜像地址",
      "DefaultValue": "http://test.zss.com/testimage.qcow2"
    },
    "BackupStorages": {
      "Type": "CommaDelimitedList",
      "Description": "所有镜像服务器",
      "DefaultValue": "BS1,BS2,BS3"
    },
    "Vlan": {
      "Type": "Number",
      "Description": "Vlan id",
      "DefaultValue": 1000
    },
    "VipPortStart": {
      "Type": "Number",
      "Description": "Vip port start num",
      "DefaultValue": 22
    },
    "LoadBalancerPort": {
      "Type": "Number",
      "Description": "load balancer port",
      "DefaultValue": 443
    },
    "PeerAddress": {
      "Type": "String",
      "DefaultValue": "192.168.200.100"
    },
    "AuthKey": {
      "Type": "String",
      "DefaultValue": "testAuthKey"
    },
    "L2Interface": {
      "Type": "String",
      "DefaultValue": "eth0"
    },
    "StartIp":{
      "Type":"String",
      "DefaultValue":"192.168.200.2"
    },
    "EndIp":{
      "Type":"String",
      "DefaultValue":"192.168.200.200"
    },
    "Netmask":{
      "Type":"String",
      "DefaultValue":"255.255.255.0"
    },
    "Gateway":{
      "Type":"String",
      "DefaultValue":"198.168.0.1"
    },
    "Dns":{
      "Type":"String",
      "DefaultValue":"114.114.114.114"
    },
    "NetworkCidr":{
      "Type":"String",
      "DefaultValue":"192.168.10.0/24"
    },
    "PeerCidrs":{
      "Type": "CommaDelimitedList",
      "Description": "PeerCidra",
      "DefaultValue": "192.168.23.0/24"
    },
    "Destination":{
      "Type":"String",
      "DefaultValue":"192.168.2.0/24"
    },
    "Prefix":{
      "Type":"String",
      "DefaultValue":"169.254.169.254/32"
    },
    "Nexthop":{
      "Type":"String",
      "DefaultValue":"192.168.1.254"
    },
    "UsbDeviceUuid":{
      "Type":"String",
      "DefaultValue":"usbDeviceUuid"
    },
    "PciDeviceUuid":{
      "Type":"String",
      "DefaultValue":"pciDeviceUuid"
    }
  },
  "Mappings": {
    "names": {
      "instanceOffering": {
        "name1": "test-just-t",
        "name2": "test2"
      }
    },
    "JustForTest": {
      "test": "I am valid!"
    },
    "JustForTest2": {
      "test": "I am valid!",
      "test2": "I am valid too!"
    }
  },
  "Resources": {
    "InstanceOffering": {
      "Type": "ZStack::Resource::InstanceOffering",
      "Properties": {
        "name": {
          "Fn::Join": [
            "-",
            [
              "a",
              "b",
              "ccc"
            ]
          ]
        },
        "description":"测试创建计算规格",
        "cpuNum": 8,
        "memorySize": 8589934592,
        "allocatorStrategy":"Mevoco",
        "resourceUuid":"testuuid",
        "sortKey":0,
        "systemTags": [
          "userdata"
        ],
        "userTags": [
          "中文",
          "testinstanceofferingusertag"
        ],
        "type":"UserVm",
        "timeout":600
      }
    },
    "DiskOffering": {
      "Type": "ZStack::Resource::DiskOffering",
      "Properties": {
        "name": "diskoffering",
        "diskSize": 1124774006935781000,
        "sortKey": 1,
        "allocationStrategy": "DefaultPrimaryStorageAllocationStrategy",
        "resourceUuid": "DefaultDiskOfferingType",
        "type": "DefaultDiskOfferingType",
        "timeout":100,
        "systemTags": [
          "test",
          "ttt"
        ],
        "userTags": [
          "中文",
          "testdiskofferingusertag"
        ]
      }
    },
    "VM": {
      "Type": "ZStack::Resource::VmInstance",
      "Properties": {
        "name": {
          "Fn::Base64": "kubernetes-Node-1"
        },
        "instanceOfferingUuid": {
          "Fn::GetAtt": [
            "InstanceOffering",
            "uuid"
          ]
        },
        "imageUuid": {
          "Fn::FindInMap": [
            "names",
            "instanceOffering",
            "name1"
          ]
        },
        "l3NetworkUuids": ["1","2"],
        "rootDiskOfferingUuid": {
          "Fn::GetAtt": [
            "DiskOffering",
            "uuid"
          ]
        },
        "dataDiskOfferingUuids": [
          "uuid1",
          "uuid2"
        ],
        "zoneUuid": {
          "Ref": "ZoneUuid"
        },
        "clusterUuid": {
          "Ref": "ClusterUuid"
        },
        "hostUuid": "hostuuid",
        "primaryStorageUuidForRootVolume": {
          "Ref": "PrimaryStorageUuidForRootVolume"
        },
        "description": "创建一个云主机··‘’“''、$# $?",
        "defaultL3NetworkUuid": "uuid",
        "strategy":"InstantStart",
        "timeout":300,
        "systemTags": [
          "userdata"
        ],
        "userTags": [
          "Test",
          "test2",
          "中文试一下;"
        ]
      },
      "DependsOn": [
        {
          "Ref": "InstanceOffering"
        }
      ],
      "DeletionPolicy": "Retain"
    },
   "DataVolume": {
    "Type": "ZStack::Resource::DataVolume",
    "Properties": {
        "name": "testDataVolume",
        "description": "创建一个云盘!!!",
        "diskOfferingUuid": {
            "Fn::GetAtt": [
                "DiskOffering",
                "uuid"
            ]
        },
        "primaryStorageUuid": {
            "Ref": "PrimaryStorageUuidForRootVolume"
        },
        "resourceUuid": "uuid",
        "timeout": 100,
        "systemTags": [
            "test",
            "ttt"
        ],
        "userTags": [
            "Test",
            "test2",
            "中文试一下;"
        ]
      }
    },
    "Image": {
      "Type": "ZStack::Resource::Image",
      "Properties": {
        "name": "testimage",
        "description": "添加镜像,‘’‘’“”",
        "url": {
          "Ref": "ImageUrl"
        },
        "mediaType": "ISO",
        "guestOsType": "Linux",
        "system": false,
        "format": "qcow2",
        "platform": "Linux",
        "mediaType": "RootVolumeTemplate",
        "backupStorageUuids": {
          "Ref": "BackupStorages"
        },
        "resourceUuid": "testuuid",
        "timeout":600,
        "systemTags": [
          "imagesystemtags",
          "imagestsytemtages2"
        ],
        "userTags": [
          "imageusertages1",
          "imageusertages2",
          "中文试一下"
        ]
      }
    },
    "AffinityGroup": {
      "Type": "ZStack::Resource::AffinityGroup",
      "Properties": {
        "name": "testAffinityGroup",
        "description":"create one 亲和组 ",
        "policy": "antiSoft",
        "type": "host",
        "resourceUuid": "affinitygroupuuid",
        "timeout": 100,
        "systemTags": ["testsystemTags"],
        "userTags": ["用户标签"]
      }
    },
    "L2VxlanNetworkPool": {
      "Type": "ZStack::Resource::L2VxlanNetworkPool",
      "Properties": {
        "name": "testl2vxlannetworkpool",
        "description":"一个vxlanpool",
        "type":"test",
        "zoneUuid": {
          "Ref": "ZoneUuid"
        },
        "physicalInterface": {
          "Ref": "L2Interface"
        },
        "timeout":100,
        "resourceUuid": "testl2vxlannetworkpooluuid",
        "systemTags": [
          "l2vxlanpoolsystemtags",
          "l2vxlanpoolsystemtages2"
        ],
        "userTags": [
          "l2vxlanpoolsertages1",
          "l2vxlanpoolertages2",
          "中文试一下;"
        ]
      }
    },
    "L2NoVlanNetwork": {
      "Type": "ZStack::Resource::L2NoVlanNetwork",
      "Properties": {
        "name": "testl2novlannetwork",
        "description": "Novlan 二层网络",
        "resourceUuid":"testuuid",
        "zoneUuid": {
          "Ref": "ZoneUuid"
        },
        "physicalInterface": "eth0",
        "type":"test",
        "timeout":200,
        "systemTags": [
          "l2novlansystemtags",
          "l2novlansystemtages2"
        ],
        "userTags": [
          "l2novlansertages1",
          "l2novlanpoolertages2",
          "中文试一下;"
        ]
      }
    },
    "L3Network":{
      "Type":"ZStack::Resource::L3Network",
      "Properties":{
        "name":"testl3network",
        "l2NetworkUuid":"uuid"
      }
    },
    "VRouterRouteTable":{
      "Type":"ZStack::Resource::VRouterRouteTable",
      "Properties":{
        "name":"testVrouterRouTable"
      }
    },
    "SecurityGroup":{
      "Type":"ZStack::Resource::SecurityGroup",
      "Properties":{
        "name":"testsecurityGroup"
      }
    },
    "LoadBalancer":{
      "Type":"ZStack::Resource::LoadBalancer",
      "Properties":{
        "name":"testLoadBalancer",
        "vipUuid":{"Fn::GetAtt":["Vip","uuid"]}
      }
    },
    "LoadBalancerListener":{
      "Type":"ZStack::Resource::LoadBalancerListener",
      "Properties":{
        "name":"testLocaBalancerListerner",
        "loadBalancerUuid":{"Fn::GetAtt":["LoadBalancer","uuid"]},
        "loadBalancerPort": {"Ref":"LoadBalancerPort"}
      }
    },
    "PortForwardingRule":{
      "Type":"ZStack::Resource::PortForwardingRule",
      "Properties":{
        "name":"testPortForwardingRule",
        "vipUuid":{"Fn::GetAtt":["Vip","uuid"]},
        "vipPortStart":{"Ref":"VipPortStart"},
        "protocolType":"TCP"
      }
    },
    "Vip":{
      "Type":"ZStack::Resource::Vip",
      "Properties":{
        "name":"testvip",
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]}
      }
    },
    "Eip":{
      "Type":"ZStack::Resource::Eip",
      "Properties":{
        "name":"testeip",
        "vipUuid":{"Fn::GetAtt":["Vip","uuid"]}
      }
    },
    "IPsecConnection": {
      "Type":"ZStack::Resource::IPsecConnection",
      "Properties":{
        "name":"testIPsecConnection",
        "peerAddress":{"Ref":"PeerAddress"},
        "authKey":{"Ref":"AuthKey"},
        "vipUuid":{"Fn::GetAtt":["Vip","uuid"]}
      }
    },
    "AddIpRange":{
      "Type":"ZStack::Action::AddIpRange",
      "Properties":{
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
        "name":"TestIpRange",
        "description":"iprange",
        "startIp":"192.168.23.1",
        "endIp":{"Ref":"EndIp"},
        "netmask":{"Ref":"Netmask"},
        "gateway":{"Ref":"Gateway"},
        "resourceUuid":"testuuid",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddDnsToL3Network":{
      "Type":"ZStack::Action::AddDnsToL3Network",
      "Properties":{
         "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
         "dns":{"Ref":"Dns"},
         "timeout":200,
         "systemTags": ["test"],
         "userTags": ["test"]
      }
    },
    "AddSecurityGroupRule":{
      "Type":"ZStack::Action::AddSecurityGroupRule",
      "Properties":{
        "securityGroupUuid":{"Fn::GetAtt":["SecurityGroup","uuid"]},
        "rules":[
        {
          "type":"Ingress",
          "startPort":22,
          "endPort":22,
          "protocol":"TCP",
          "allowedCidr":"0.0.0.0/0"
        }
        ],
        "remoteSecurityGroupUuids":["testuuid"],
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddVmToAffinityGroup":{
      "Type":"ZStack::Action::AddVmToAffinityGroup",
      "Properties":{
        "affinityGroupUuid":{"Fn::GetAtt":["AffinityGroup","uuid"]},
        "uuid":{"Fn::GetAtt":["VM","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddVRouterRouteEntry":{
      "Type":"ZStack::Action::AddVRouterRouteEntry",
      "Properties":{
        "routeTableUuid":{"Fn::GetAtt":["VRouterRouteTable","uuid"]},
        "destination":{"Ref":"Destination"},
        "type":"UserStatic",
        "routeTableUuid":"routeTableUuid",
        "description":"test",
        "target":"10.141.23.2",
        "distance":1,
        "resourceUuid":"testuuid",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddIpRangeByNetworkCidr":{
      "Type":"ZStack::Action::AddIpRangeByNetworkCidr",
      "Properties":{
        "name":"Test-IpRange",
        "description":"111",
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
        "networkCidr":{"Ref":"NetworkCidr"},
        "resourceUuid":"addiprangeuuid",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddVmNicToLoadBalancer":{
      "Type":"ZStack::Action::AddVmNicToLoadBalancer",
      "Properties":{
        "vmNicUuids":["testuuid"],
        "listenerUuid":{"Fn::GetAtt":["LoadBalancerListener","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddVmNicToSecurityGroup":{
      "Type":"ZStack::Action::AddVmNicToSecurityGroup",
      "Properties":{
        "securityGroupUuid":{"Fn::GetAtt":["SecurityGroup","uuid"]},
        "vmNicUuids":["testuuid"],
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddRemoteCidrsToIPsecConnection":{
      "Type":"ZStack::Action::AddRemoteCidrsToIPsecConnection",
      "Properties":{
        "uuid":{"Fn::GetAtt":["IPsecConnection","uuid"]},
        "peerCidrs":{"Ref":"PeerCidrs"},
        "resourceUuid":"addiprangeuuid",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]

      }
    },
    "AttachEip":{
      "Type":"ZStack::Action::AttachEip",
      "Properties":{
        "eipUuid":{"Fn::GetAtt":["Eip","uuid"]},
        "vmNicUuid":"testuuid",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachDataVolumeToVm":{
      "Type":"ZStack::Action::AttachDataVolumeToVm",
      "Properties":{
        "vmInstanceUuid":{"Fn::GetAtt":["VM","uuid"]},
        "volumeUuid":{"Fn::GetAtt":["DataVolume","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachPortForwardingRule":{
      "Type":"ZStack::Action::AttachPortForwardingRule",
      "Properties":{
        "ruleUuid":{"Fn::GetAtt":["PortForwardingRule","uuid"]},
        "vmNicUuid":{"Fn::Select": ["0", ["foo; bar; achoo"]]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]

      }
    },
    "AttachIsoToVmInstance":{
      "Type":"ZStack::Action::AttachIsoToVmInstance",
      "Properties":{
        "vmInstanceUuid":{"Fn::GetAtt":["VM","uuid"]},
        "isoUuid":"isouuid",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachPciDeviceToVm":{
      "Type":"ZStack::Action::AttachPciDeviceToVm",
      "Properties":{
        "pciDeviceUuid":{"Ref":"PciDeviceUuid"},
        "vmInstanceUuid":{"Fn::GetAtt":["VM","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachUsbDeviceToVm":{
      "Type":"ZStack::Action::AttachUsbDeviceToVm",
      "Properties":{
        "usbDeviceUuid":{"Ref":"UsbDeviceUuid"},
        "vmInstanceUuid":{"Fn::GetAtt":["VM","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachL2NetworkToCluster":{
      "Type":"ZStack::Action::AttachL2NetworkToCluster",
      "Properties":{
        "l2NetworkUuid":{"Fn::GetAtt":["L2NoVlanNetwork","uuid"]},
        "clusterUuid":{"Ref":"ClusterUuid"},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachL3NetworkToVm":{
      "Type":"ZStack::Action::AttachL3NetworkToVm",
      "Properties":{
        "vmInstanceUuid":{"Fn::GetAtt":["VM","uuid"]},
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
        "staticIp":"10.141.23.22",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachNetworkServiceToL3Network":{
      "Type":"ZStack::Action::AttachNetworkServiceToL3Network",
      "Properties":{
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
        "networkServices":{"1":"a","2":"b"},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachSecurityGroupToL3Network":{
      "Type":"ZStack::Action::AttachSecurityGroupToL3Network",
      "Properties":{
        "securityGroupUuid":{"Fn::GetAtt":["SecurityGroup","uuid"]},
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AttachVRouterRouteTableToVRouter":{
      "Type":"ZStack::Action::AttachVRouterRouteTableToVRouter",
      "Properties":{
        "routeTableUuid":{"Fn::GetAtt":["VRouterRouteTable","uuid"]},
        "virtualRouterVmUuid":"lei",
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddCertificateToLoadBalancerListener":{
      "Type":"ZStack::Action::AddCertificateToLoadBalancerListener",
      "Properties":{
        "certificateUuid":"lei",
        "listenerUuid":{"Fn::GetAtt":["LoadBalancerListener","uuid"]},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    },
    "AddHostRouteToL3Network":{
      "Type":"ZStack::Action::AddHostRouteToL3Network",
      "Properties":{
        "l3NetworkUuid":{"Fn::GetAtt":["L3Network","uuid"]},
        "prefix":{"Ref":"Prefix"},
        "nexthop":{"Ref":"Nexthop"},
        "timeout":200,
        "systemTags": ["test"],
        "userTags": ["test"]
      }
    }

  },
  "Outputs": {
    "InstanceOffering": {
      "Value": {
        "Ref": "InstanceOffering"
      }
    },
    "IP": {
      "Value": {
        "Fn::Select": [
          "0",
          [
            "ip",
            "11",
            "test"
          ]
        ]
      }
    }
  }
}


'''
    parameter = '''
{
    "TestStringBasicEcho": "Just a string Possiple"
}
'''

    stack_template_option.set_templateContent(templateContent)
    #preview_resource_stack = resource_stack_ops.preview_resource_stack(stack_template_option)
    try:
        preview_resource_stack(templateContent, parameter)
    except:
        test_util.test_fail('error')

    stack_template = stack_template_ops.add_stack_template(
        stack_template_option)

    cond = res_ops.gen_query_conditions('uuid', '=', stack_template.uuid)
    stack_template_queried = res_ops.query_resource(res_ops.STACK_TEMPLATE,
                                                    cond)
    if len(stack_template_queried) == 0:
        test_util.test_fail("Fail to query stack template")

    test_util.test_pass('Create Stack Template Test Success')
def test():
    global session_to
    global session_mc

    session_to = con_ops.change_global_config('identity', 'session.timeout', '720000')
    session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000')
    test_util.test_dsc('Create test vm as utility vm')
    vm = test_stub.create_vlan_vm()
    test_obj_dict.add_vm(vm)
    #use root volume to skip add_checking_point
    test_util.test_dsc('Use root volume for snapshot testing')
    root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm())
    root_volume = zstack_volume_header.ZstackTestVolume()
    root_volume.set_volume(root_volume_inv)
    root_volume.set_state(volume_header.ATTACHED)
    root_volume.set_target_vm(vm)
    test_obj_dict.add_volume(root_volume)
    vm.check()

    snapshots = test_obj_dict.get_volume_snapshot(root_volume.get_volume().uuid)
    snapshots.set_utility_vm(vm)

    ori_num = 100
    index = 1
    while index < 101:
        thread = threading.Thread(target=create_snapshot, args=(snapshots, index,))
        thread.start()
        index += 1

    while threading.activeCount() > 1:
        time.sleep(0.1)

    cond = res_ops.gen_query_conditions('volumeUuid', '=', root_volume.get_volume().uuid)
    sps_num = res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond)

    if sps_num != ori_num:
        test_util.test_fail('Create %d snapshots, but only %d snapshots were successfully created' % (ori_num, sps_num))

    test_num = 100
    snapshot_list = snapshots.get_snapshot_list()
    for index in range(test_num):
        thread_1 = threading.Thread(target=snapshots.delete_snapshot, args=(random.choice(snapshot_list),))
        thread_2 = threading.Thread(target=snapshots.use_snapshot, args=(random.choice(snapshot_list),))
        thread_1.start()
        thread_2.start()

    while threading.activeCount() > 1:
        time.sleep(0.1)

    #snapshot.check() doesn't work for root volume
    #snapshots.check()
    #check if snapshot exists in ps (host) install_path
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE or ps.type == inventory.LOCAL_STORAGE_TYPE:
        cond = res_ops.gen_query_conditions('volumeUuid', '=', root_volume.get_volume().uuid)
        sps_in_database = res_ops.query_resource(res_ops.VOLUME_SNAPSHOT, cond)
        host = test_lib.lib_get_vm_host(vm.get_vm())
        for snapshot_inv in sps_in_database:
            sp_ps_install_path = snapshot_inv.primaryStorageInstallPath
            if test_lib.lib_check_file_exist(host, sp_ps_install_path):
                test_util.test_logger('Check result: snapshot %s is found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path))
            else:
                test_lib.lib_robot_cleanup(test_obj_dict)
                test_util.test_fail('Check result: snapshot %s is not found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path))
    else:
        test_util.test_logger('Skip check file install path for %s primary storage' % (ps.type))

    try:
        test_lib.lib_robot_cleanup(test_obj_dict)
    except:
        test_lib.test_logger('Delete VM may timeout')
    test_util.test_pass('Test delete and revert 100 snapshots simultaneously success')
def test():
    if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(
            test_lib.scenario_file):
        scenario_operations.deploy_scenario(test_lib.all_scenario_config,
                                            test_lib.scenario_file,
                                            test_lib.deploy_config)
        test_util.test_skip('Suite Setup Success')
    if test_lib.scenario_config != None and test_lib.scenario_destroy != None:
        scenario_operations.destroy_scenario(test_lib.all_scenario_config,
                                             test_lib.scenario_destroy)

    nic_name = "eth0"
    if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(
            test_lib.scenario_file):
        nic_name = "zsn0"
    public_l2_vlan = int(os.getenv('l2PublicVlan'))
    #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine.
    #linux.create_vlan_eth("eth0", 10, "10.0.0.200", "255.255.255.0")
    #linux.create_vlan_eth("eth0", 11, "10.0.1.200", "255.255.255.0")
    #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases.
    linux.create_vlan_eth(nic_name, 10)
    linux.create_vlan_eth(nic_name, 11)
    linux.create_vlan_eth(nic_name, public_l2_vlan)

    #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run()
    test_lib.setup_plan.deploy_test_agent()
    cmd = host_plugin.CreateVlanDeviceCmd()

    hosts = test_lib.lib_get_all_hosts_from_plan()
    if type(hosts) != type([]):
        hosts = [hosts]
    for host in hosts:
        cmd.ethname = nic_name
        cmd.vlan = 10
        http.json_dump_post(
            testagent.build_http_path(host.managementIp_,
                                      host_plugin.CREATE_VLAN_DEVICE_PATH),
            cmd)
        cmd.vlan = 11
        http.json_dump_post(
            testagent.build_http_path(host.managementIp_,
                                      host_plugin.CREATE_VLAN_DEVICE_PATH),
            cmd)
        cmd.vlan = public_l2_vlan
        http.json_dump_post(
            testagent.build_http_path(host.managementIp_,
                                      host_plugin.CREATE_VLAN_DEVICE_PATH),
            cmd)

    test_lib.setup_plan.execute_plan_without_deploy_test_agent()
    if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(
            test_lib.scenario_file):
        mn_ips = deploy_operations.get_nodes_from_scenario_file(
            test_lib.all_scenario_config, test_lib.scenario_file,
            test_lib.deploy_config)
        if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
            os.system("bash %s '%s'" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ips))
    elif os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
        os.system("bash %s" % (EXTRA_SUITE_SETUP_SCRIPT))

    deploy_operations.deploy_initial_database(test_lib.deploy_config,
                                              test_lib.all_scenario_config,
                                              test_lib.scenario_file)

    delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct')
    delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct')
    delete_policy = test_lib.lib_set_delete_policy('image', 'Direct')
    if test_lib.lib_get_ha_selffencer_maxattempts() != None:
        test_lib.lib_set_ha_selffencer_maxattempts('60')
        test_lib.lib_set_ha_selffencer_storagechecker_timeout('60')
    test_lib.lib_set_primary_storage_imagecache_gc_interval(1)

    bss = res_ops.query_resource(res_ops.BACKUP_STORAGE)
    bs = bss[0]
    test_util.test_pass('Suite Setup Success')
Example #35
0
def test():
    global vm
    global host_uuid
    global test_host
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.LOCAL_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    test_lib.lib_skip_if_ps_num_is_not_eq_number(2)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('ls_vm_ha_self_start')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    test_stub.ensure_host_has_no_vr(host_uuid)

    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config,
                                        test_lib.scenario_file)
    for host in host_list:
        if host.ip_ == host_ip:
            test_host = host
            break
    if not test_host:
        test_util.test_fail('there is no host with ip %s in scenario file.' %
                            (host_ip))

    test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold')

    vm_stop_time = None
    cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start')
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond)
    for i in range(0, 300):
        vm_stop_time = i
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Stopped":
            test_stub.start_host(test_host, test_lib.all_scenario_config)
            test_stub.recover_host_vlan(test_host,
                                        test_lib.all_scenario_config,
                                        test_lib.deploy_config)
            break
        time.sleep(1)
    if vm_stop_time is None:
        vm_stop_time = 300
    for i in range(vm_stop_time, 300):
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Starting":
            break
        time.sleep(1)
    else:
        test_util.test_fail(
            "vm has not been changed to running as expected within 300s.")

    vm.destroy()

    test_util.test_pass(
        'Test checking VM ha and none status when force stop vm Success.')
Example #36
0
def test():
    global vm
    global host_uuid
    global test_host
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.LOCAL_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('ls_vm_none_status')
    vm2 = test_vm_header.ZstackTestVm()
    vm2.set_creation_option(vm_creation_option)
    vm2.create()

    test_stub.ensure_host_has_no_vr(host_uuid)

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm2.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config,
                                        test_lib.scenario_file)
    for host in host_list:
        if host.ip_ == host_ip:
            test_host = host
            break
    if not test_host:
        test_util.test_fail('there is no host with ip %s in scenario file.' %
                            (host_ip))

    test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold')

    cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_none_status')
    cond = res_ops.gen_query_conditions('uuid', '=', vm2.vm.uuid, cond)

    vm_stop_time = None
    for i in range(0, 180):
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Unknown":
            vm_stop_time = i
            test_stub.start_host(test_host, test_lib.all_scenario_config)
            test_stub.recover_host_vlan(test_host,
                                        test_lib.all_scenario_config,
                                        test_lib.deploy_config)
            break
        time.sleep(1)

    if not vm_stop_time:
        vm_stop_time = 180

    for i in range(vm_stop_time, 180):
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Stopped":
            break
        time.sleep(1)
    else:
        test_util.test_fail(
            "vm none is not change to Stopped as expected within 180s.")

    test_util.test_pass(
        'Test checking vm none status when host has been force stop Success.')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()
    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host,
                                        test_lib.all_scenario_config,
                                        downMagt=False)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) == 0:
        test_util.test_fail(
            'MN VM has been murdered, expected result should not be impacted when the separated network is down.'
        )

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index],
                               test_lib.all_scenario_config,
                               test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 120s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after MN VM is running")

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(
        test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Example #38
0
def test():
    global mevoco1_ip
    global mevoco2_ip
    global mevoco3_ip
    global ipsec11
    global ipsec12
    global ipsec2
    global ipsec3
    mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mevoco2_ip = os.environ['secondZStackMnIp']
    mevoco3_ip = os.environ['thirdZStackMnIp']
    test_util.test_dsc('Create test vm in mevoco1')
    vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
    test_obj_dict1.add_vm(vm1)
    vm1.check()
    pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid
    vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0]
    l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid
    vip11 = test_stub.create_vip('ipsec1_vip', l3_uuid1)
    #vip12 = test_stub.create_vip('ipsec1_vip', l3_uuid1)
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1)
    first_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    test_util.test_dsc('Create test vm in mevoco2')
    vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName'))
    test_obj_dict2.add_vm(vm2)
    vm2.check()
    pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid
    vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0]
    l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid
    vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2)
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2)
    second_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
    test_util.test_dsc('Create test vm in mevoco3')
    vm3 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName3'))
    test_obj_dict2.add_vm(vm3)
    vm3.check()
    pri_l3_uuid3 = vm3.vm.vmNics[0].l3NetworkUuid
    vr3 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid3)[0]
    l3_uuid3 = test_lib.lib_find_vr_pub_nic(vr3).l3NetworkUuid
    vip3 = test_stub.create_vip('ipsec3_vip', l3_uuid3)
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid3)
    third_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr

    cond = res_ops.gen_query_conditions('l3Network.uuid', '=', pri_l3_uuid3)
    cond = res_ops.gen_query_conditions('vmInstanceUuid', '=', vr3.uuid, cond)
    vr3_pri_ip = res_ops.query_resource(res_ops.VM_NIC, cond)[0].ip
    cmd = 'route del default; route add default gw %s' %vr3_pri_ip
    os.system("sshpass -p 'password' ssh root@%s '%s'" %(vm3.vm.vmNics[0].ip, cmd))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_util.test_dsc('Create ipsec in mevoco1')
    ipsec11 = ipsec_ops.create_ipsec_connection('ipsec11', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip11.get_vip().uuid, [second_zstack_cidrs])
    #ipsec12 = ipsec_ops.create_ipsec_connection('ipsec12', pri_l3_uuid1, vip3.get_vip().ip, '123456', vip12.get_vip().uuid, [third_zstack_cidrs])
    ipsec12 = ipsec_ops.create_ipsec_connection('ipsec12', pri_l3_uuid1, vip3.get_vip().ip, '123456', vip11.get_vip().uuid, [third_zstack_cidrs])

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    test_util.test_dsc('Create ipsec in mevoco2')
    ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2, vip11.get_vip().ip, '123456', vip2.get_vip().uuid, [first_zstack_cidrs])

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
    test_util.test_dsc('Create ipsec in mevoco3')
    #ipsec3 = ipsec_ops.create_ipsec_connection('ipsec3', pri_l3_uuid3, vip12.get_vip().ip, '123456', vip3.get_vip().uuid, [first_zstack_cidrs])
    ipsec3 = ipsec_ops.create_ipsec_connection('ipsec3', pri_l3_uuid3, vip11.get_vip().ip, '123456', vip3.get_vip().uuid, [first_zstack_cidrs])

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip):
        test_util.test_fail('vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip))
    if not test_lib.lib_check_ping(vm1.vm, vm3.vm.vmNics[0].ip):
        test_util.test_fail('vm in mevoco1[MN:%s] could not connect to vm in mevoco3[MN:%s]' % (mevoco1_ip, mevoco3_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip):
        test_util.test_fail('vm in mevoco2[MN:%s] could not connect to vm in mevoco1[MN:%s]' % (mevoco2_ip, mevoco1_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
    if not test_lib.lib_check_ping(vm3.vm, vm1.vm.vmNics[0].ip):
        test_util.test_fail('vm in mevoco3[MN:%s] could not connect to vm in mevoco1[MN:%s]' % (mevoco3_ip, mevoco1_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    ipsec_ops.delete_ipsec_connection(ipsec11.uuid)

    if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail('vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail('vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    if not test_lib.lib_check_ping(vm1.vm, vm3.vm.vmNics[0].ip):
        test_util.test_fail('vm in mevoco1[MN:%s] could not connect to vm in mevoco3[MN:%s]' % (mevoco1_ip, mevoco3_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
    ipsec_ops.delete_ipsec_connection(ipsec3.uuid)

    if test_lib.lib_check_ping(vm3.vm, vm1.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail('vm in mevoco1[MN:%s] could still connect to vm in mevoco3[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco3_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    ipsec_ops.delete_ipsec_connection(ipsec12.uuid)
    test_lib.lib_error_cleanup(test_obj_dict1)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    ipsec_ops.delete_ipsec_connection(ipsec2.uuid)
    test_lib.lib_error_cleanup(test_obj_dict2)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_util.test_pass('Create Ipsec Success')
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    test_stub.skip_if_not_storage_network_separate(
        test_lib.all_scenario_config)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    vr_hosts = test_stub.get_host_has_vr()
    mn_hosts = test_stub.get_host_has_mn()
    nfs_hosts = test_stub.get_host_has_nfs()
    #test_stub.test_skip('debug')
    if not test_stub.ensure_vm_not_on(vm.get_vm().uuid,
                                      vm.get_vm().hostUuid,
                                      vr_hosts + mn_hosts + nfs_hosts):
        test_util.test_fail("Not find out a suitable host")

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    #test_stub.down_host_network(host_ip, test_lib.all_scenario_config)
    host_username = os.environ.get('hostUsername')
    host_password = os.environ.get('hostPassword')
    t = test_stub.async_exec_ifconfig_nic_down_up(120, host_ip, host_username,
                                                  host_password, "br_zsn0")

    vm_stop_time = None
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid)
    for i in range(0, max_time):
        vm_stop_time = i
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Unknown":
            break
        time.sleep(1)

    if vm_stop_time is None:
        vm_stop_time = max_time

    for i in range(vm_stop_time, max_time):
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Running":
            break
        time.sleep(1)
    else:
        test_util.test_fail(
            "vm has not been changed to running as expected within %s s." %
            (max_time))

    vm.destroy()
    t.join()

    test_util.test_pass('Test VM ha change to running within 180s Success')
Example #40
0
def delete_zones():
    zones = res_ops.query_resource(res_ops.ZONE, [])
    for zone in zones:
        zone_ops.delete_zone(zone.uuid)
Example #41
0
def test():
    node_ip = "localhost"
    host_username = "******"
    host_password = "******"
    if res_ops.query_resource(res_ops.SFTP_BACKUP_STORAGE):
        bs = res_ops.query_resource(res_ops.SFTP_BACKUP_STORAGE)[0]
    else:
        test_util.test_skip("No sftp backupstorage for test. Skip test")

    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    l3_net_list = [l3_net_uuid]
    vm1 = test_stub.create_vm(l3_net_list, image_uuid, 'test_image_size_vm1')
    test_obj_dict.add_vm(vm1)
    vm1.stop()  #commit backup storage specified need stop vm first
    image_creation_option = test_util.ImageOption()
    image_creation_option.set_backup_storage_uuid_list([bs.uuid])
    image_creation_option.set_root_volume_uuid(vm1.vm.rootVolumeUuid)
    image_creation_option.set_name('create_vm_image_from_root_volume')
    image1 = test_image.ZstackTestImage()
    image1.set_creation_option(image_creation_option)
    image1.create()
    test_obj_dict.add_image(image1)
    image1.check()

    vm1.destroy()

    target_image_name = 'target_comparion_image'
    image_uuid = test_lib.lib_get_image_by_name(
        'create_vm_image_from_root_volume').uuid
    vm2 = test_stub.create_vm(l3_net_list, image_uuid, 'test_image_size_vm2')
    test_obj_dict.add_vm(vm2)
    vm2.stop()  #commit backup storage specified need stop vm first
    image_creation_option.set_root_volume_uuid(vm2.vm.rootVolumeUuid)
    image_creation_option.set_name(target_image_name)
    image2 = test_image.ZstackTestImage()
    image2.set_creation_option(image_creation_option)
    image2.create()
    test_obj_dict.add_image(image2)

    vm2.destroy()

    cond = res_ops.gen_query_conditions('backupStorageRef.backupStorage.uuid',
                                        '=', bs.uuid)
    images = res_ops.query_resource(res_ops.IMAGE, cond)
    for image in images:
        image_name = image.name
        image_uuid = image.uuid
        image_path = image.backupStorageRefs[0].installPath
        image_size = image.size
        image_actual_size = image.actualSize
        if image_name == target_image_name:
            cmd = "ls -al %s|awk '{print $5}'" % image_path
            rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username,
                                               host_password, cmd, 180)
            #store_actual_size = int(rsp.rstrip())
            store_actual_size = rsp
            if store_actual_size != image_actual_size:
                test_util.test_fail(
                    'The image actual size is different. Size in database is %s, in the storage path is %s'
                    % (str(image_actual_size), str(store_actual_size)))
            cmd = "qemu-img info %s|grep 'virtual size'|awk -F'(' '{print $2}'|awk '{print $1}'" % image_path
            rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username,
                                               host_password, cmd, 180)
            #store_size = int(rsp.rstrip())
            store_size = rsp
            if store_size != image_size:
                test_util.test_fail(
                    'The image size is different. Size in database is %s, in the storage path is %s'
                    % (str(image_size), str(store_size)))

    image1.delete()
    image2.delete()
    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('imagecache cleanup Pass.')
Example #42
0
 def query_bs(self):
     bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
     return bs
Example #43
0
 def get_ps(self):
     self.get_current_ps()
     cond = res_ops.gen_query_conditions('name', '=', self.ps_1_name)
     self.ps_1 = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
     cond2 = res_ops.gen_query_conditions('name', '=', self.ps_2_name)
     self.ps_2 = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond2)[0]
Example #44
0
 def get_image(self):
     conditions = res_ops.gen_query_conditions('name', '=',
                                               self.image_name_net)
     self.image = res_ops.query_resource(res_ops.IMAGE, conditions)[0]
Example #45
0
def test():
    global mevoco1_ip
    global mevoco2_ip
    global ipsec1
    global ipsec2
    mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mevoco2_ip = os.environ['secondZStackMnIp']
    test_util.test_dsc('Create test vm in mevoco1')
    vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
    test_obj_dict1.add_vm(vm1)
    vm1.check()
    pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid
    vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0]
    l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid
    vip1 = test_stub.create_vip('ipsec1_vip', l3_uuid1)
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1)
    first_zstack_cidrs = res_ops.query_resource(
        res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    test_util.test_dsc('Create test vm in mevoco2')
    vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName'))
    test_obj_dict2.add_vm(vm2)
    vm2.check()
    pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid
    vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0]
    l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid
    vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2)
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2)
    second_zstack_cidrs = res_ops.query_resource(
        res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_util.test_dsc('Create ipsec in mevoco1')
    ipsec1 = ipsec_ops.create_ipsec_connection('ipsec1',
                                               pri_l3_uuid1,
                                               vip2.get_vip().ip,
                                               '123456',
                                               vip1.get_vip().uuid,
                                               [second_zstack_cidrs],
                                               ike_dh_group=5)

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    test_util.test_dsc('Create ipsec in mevoco2')
    ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2',
                                               pri_l3_uuid2,
                                               vip1.get_vip().ip,
                                               '123456',
                                               vip2.get_vip().uuid,
                                               [first_zstack_cidrs],
                                               ike_dh_group=5)

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip):
        test_util.test_fail(
            'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' %
            (mevoco1_ip, mevoco2_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip):
        test_util.test_fail(
            'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' %
            (mevoco2_ip, mevoco1_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    ipsec_ops.delete_ipsec_connection(ipsec1.uuid)

    if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail(
            'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted'
            % (mevoco1_ip, mevoco2_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail(
            'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted'
            % (mevoco2_ip, mevoco1_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_lib.lib_error_cleanup(test_obj_dict1)
    vip1.delete()
    test_obj_dict1.rm_vip(vip1)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    ipsec_ops.delete_ipsec_connection(ipsec2.uuid)
    test_lib.lib_error_cleanup(test_obj_dict2)
    vip2.delete()
    test_obj_dict2.rm_vip(vip2)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_util.test_pass('Create Ipsec Success')
def test():
    global image
    global test_obj_dict
    allow_ps_list = [inventory.LOCAL_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    #run condition
    hosts = res_ops.query_resource(res_ops.HOST)
    if len(hosts) <= 1:
        test_util.test_skip(
            "skip for host_num is not satisfy condition host_num>1")

    bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE,
                                        bs_cond,
                                        None,
                                        fields=['uuid'])

    #create disk offering
    data_volume_size = 10737418240
    disk_offering_option = test_util.DiskOfferingOption()
    disk_offering_option.set_name('root-disk-iso')
    disk_offering_option.set_diskSize(data_volume_size)
    data_volume_offering = vol_ops.create_volume_offering(disk_offering_option)
    test_obj_dict.add_disk_offering(data_volume_offering)

    #create instance offering
    cpuNum = 2
    memorySize = 1024 * 1024 * 1024
    name = 'iso-vm-offering'
    new_offering_option = test_util.InstanceOfferingOption()
    new_offering_option.set_cpuNum(cpuNum)
    new_offering_option.set_memorySize(memorySize)
    new_offering_option.set_name(name)
    new_offering = vm_ops.create_instance_offering(new_offering_option)
    test_obj_dict.add_instance_offering(new_offering)

    #add iso
    img_option = test_util.ImageOption()
    img_option.set_name('iso1')
    bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [],
                                            None)[0].uuid
    img_option.set_backup_storage_uuid_list([bs_uuid])
    img_option.set_url(os.environ.get('isoForVmUrl'))
    image_inv = img_ops.add_iso_template(img_option)
    image_uuid = image_inv.uuid
    image = test_image.ZstackTestImage()
    image.set_image(image_inv)
    image.set_creation_option(img_option)
    test_obj_dict.add_image(image)

    #create vm by iso
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    root_disk_uuid = data_volume_offering.uuid
    vm = test_stub.create_vm_with_iso([l3_net_uuid], image_uuid, 'iso-vm',
                                      root_disk_uuid, new_offering.uuid)
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_obj_dict.add_vm(vm)

    #check vm
    vm_inv = vm.get_vm()
    vm_ip = vm_inv.vmNics[0].ip

    #cmd ='[ -e /root ]'
    #ssh_timeout = test_lib.SSH_TIMEOUT
    #test_lib.SSH_TIMEOUT = 3600
    test_lib.lib_set_vm_host_l2_ip(vm_inv)
    test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800)
    #if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host_ip, vm_ip, 'root', 'password', cmd):
    #    test_lib.SSH_TIMEOUT = ssh_timeout
    #    test_util.test_fail("iso has been failed to installed.")

    #test_lib.SSH_TIMEOUT = ssh_timeout

    #delete iso
    image.delete()
    test_obj_dict.rm_image(image)

    #expunge iso
    image.expunge()

    #detach iso
    img_ops.detach_iso(vm.vm.uuid)

    #vm ops test
    test_stub.vm_ops_test(vm, "VM_TEST_MIGRATE")

    vm.destroy()
    vol_ops.delete_disk_offering(root_disk_uuid)
    vm_ops.delete_instance_offering(new_offering.uuid)
    test_obj_dict.rm_vm(vm)
    test_obj_dict.rm_disk_offering(data_volume_offering)
    test_obj_dict.rm_instance_offering(new_offering)

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Create VM Image in Image Store Success')
def test():
    global vm
    global mn_host
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()

    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))

    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip

    test_util.test_logger(
        "shutdown host's network [%s] that mn vm is running on" %
        (mn_host[0].ip_))
    test_stub.shutdown_host_network(mn_host[0],
                                    test_lib.all_scenario_config,
                                    downMagt=False)
    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on another host"
    )
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or new_mn_host_ip == mn_host[
            0].ip_ or new_mn_host_ip == mn_host[0].managementIp_:
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 120s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    #node_ops.wait_for_management_server_start()
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
    test_stub.ensure_bss_host_connected_from_sep_net_down(
        test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    test_stub.return_pass_ahead_if_3sites("TEST PASS")
    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Example #48
0
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    must_ps_list = [inventory.LOCAL_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE]
    test_lib.skip_test_if_any_ps_not_deployed(must_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('ls_vm_none_status')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    vr_hosts = test_stub.get_host_has_vr()
    mn_hosts = test_stub.get_host_has_mn()
    nfs_hosts = test_stub.get_host_has_nfs()
    if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts):
        test_util.test_fail("Not find out a suitable host")

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" %(host_ip))

    test_stub.down_host_network(host_ip, test_lib.all_scenario_config)

    cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_none_status')
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond)

    for i in range(0, 300):
        vm_stop_time = i
        if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Unknown":
            test_stub.up_host_network(host_ip, test_lib.all_scenario_config)
            conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip)
            kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
            host_ops.reconnect_host(kvm_host_uuid)
            break
        time.sleep(1)

    if vm_stop_time is None:
        vm_stop_time = 300

    for i in range(vm_stop_time, 300):
        if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running":
            break
        time.sleep(1)
    else:
        test_util.test_fail("vm has not been changed to running as expected within 300s.")

    vm.destroy()

    test_util.test_pass('Test VM none change to Stopped within 300s Success')
def test():
    global vm
    bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \
            None, fields=['uuid'])
    if not bss:
        test_util.test_skip("not find available backup storage. Skip test")

    image_option = test_util.ImageOption()
    image_option.set_name('test_image_cache_cleanup')
    image_option.set_format('qcow2')
    image_option.set_mediaType('RootVolumeTemplate')
    image_option.set_url(os.environ.get('imageUrl_s'))
    image_option.set_backup_storage_uuid_list([bss[0].uuid])

    new_image = zstack_image_header.ZstackTestImage()
    new_image.set_creation_option(image_option)

    new_image.add_root_volume_template()

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option = test_util.VmOption()
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(new_image.image.uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('test_image_cache_cleanup_vm1')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()
    ps = test_lib.lib_get_primary_storage_by_uuid(
        vm.get_vm().allVolumes[0].primaryStorageUuid)
    if ps.type != inventory.LOCAL_STORAGE_TYPE:
        test_util.test_skip('Skip test on non-localstorage')

    test_obj_dict.add_vm(vm)
    vm.check()
    host = test_lib.lib_find_host_by_vm(vm.get_vm())
    target_host = test_lib.lib_find_random_host(vm.vm)
    vm.stop()
    vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid)
    vm.check()
    vm.start()
    vm.check()

    new_image.delete()
    new_image.expunge()
    ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid)
    if ps.type == inventory.LOCAL_STORAGE_TYPE:
        image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath,
                                                          new_image.image.uuid)
        if test_lib.lib_check_file_exist(host, image_cache_path):
            test_util.test_fail('image cache is expected to be deleted')
#    elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
#    elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
#    elif ps.type == 'SharedMountPoint':

    vm.destroy()
    test_util.test_pass('Migrate VM Test Success')
def test():
    global role_uuid, project_uuid, project_02_uuid, project_template_01_uuid, project_template_02_uuid, \
        company_uuid_01, company_uuid_02, department_01_uuid, department_02_uuid, virtual_id_group_uuid, \
        virtual_id_uuid

    iam2_ops.clean_iam2_enviroment()

    # 1 create role and add/remove policy
    statements = [{"effect": "Allow", "actions": ["org.zstack.header.vm.**"]}]
    role_uuid = iam2_ops.create_role('test_role', statements).uuid
    action = "org.zstack.header.image.**"
    statements = [{"effect": "Allow", "actions": [action]}]
    iam2_ops.add_policy_statements_to_role(role_uuid, statements)
    statement_uuid = iam2_ops.get_policy_statement_uuid_of_role(
        role_uuid, action)
    # statement_uuid= res_ops.get_resource(res_ops.ROLE, uuid=role_uuid)[0].statements[0].uuid
    iam2_ops.remove_policy_statements_from_role(role_uuid, [statement_uuid])

    # 2 create project and  add/remove attributes to/from it
    project_name = 'test_project'
    project_uuid = iam2_ops.create_iam2_project(project_name).uuid

    zone_inv = res_ops.query_resource(res_ops.ZONE)
    if len(zone_inv) >= 2:
        attributes = [{
            "name": "__ProjectRelatedZone__",
            "value": zone_inv[0].uuid
        }]
        iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes)
        username = '******'
        password = \
            'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
        virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username,
                                                          password).uuid
        iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid],
                                                 project_uuid)
        session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
        session_uuid = iam2_ops.login_iam2_project(project_name,
                                                   session_uuid).uuid
        cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_inv[1].uuid)
        host_inv = res_ops.query_resource(res_ops.HOST,
                                          cond,
                                          session_uuid=session_uuid)
        if host_inv:
            test_util.test_fail("test Project Related Zone fail")
        attribute_uuid = iam2_ops.get_attribute_uuid_of_project(
            project_uuid, "__ProjectRelatedZone__")
        iam2_ops.delete_iam2_virtual_id(virtual_id_uuid)
        iam2_ops.remove_attributes_from_iam2_project(project_uuid,
                                                     [attribute_uuid])

    # 3 create project template from project
    project_template_01_uuid = iam2_ops.create_iam2_project_template_from_project(
        'project_template', project_uuid, 'this is a template '
        'description').uuid
    project_template_inv = res_ops.get_resource(res_ops.IAM2_PROJECT_TEMPLATE,
                                                uuid=project_template_01_uuid)
    if not project_template_inv:
        test_util.test_fail("create template from project fail")

    # 4 create project template and then create project from template
    project_template_02_uuid = iam2_ops.create_iam2_project_template(
        'project_template_02').uuid
    project_02_uuid = iam2_ops.create_iam2_project_from_template(
        'project_02', project_template_02_uuid).uuid
    project_inv = res_ops.get_resource(res_ops.IAM2_PROJECT,
                                       uuid=project_02_uuid)
    if not project_inv:
        test_util.test_fail("create project from template fail")

    # 5 create Company and Department (organization)
    company_uuid_01 = iam2_ops.create_iam2_organization(
        'test_company_01', 'Company').uuid
    company_uuid_02 = iam2_ops.create_iam2_organization(
        'test_company_02', 'Company').uuid
    department_01_uuid = iam2_ops.create_iam2_organization(
        'test_department_01', 'Department', parent_uuid=company_uuid_01).uuid
    department_02_uuid = iam2_ops.create_iam2_organization(
        'test_department_02', 'Department').uuid

    # 6 organization change parent
    iam2_ops.change_iam2_organization_parent(company_uuid_02,
                                             [department_02_uuid])
    iam2_ops.change_iam2_organization_parent(company_uuid_02,
                                             [department_01_uuid])
    department_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION,
                                          uuid=department_01_uuid)[0]
    if department_inv.parentUuid != company_uuid_02:
        test_util.test_fail('change organization parent fail')
    department_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION,
                                          uuid=department_02_uuid)[0]
    if department_inv.parentUuid != company_uuid_02:
        test_util.test_fail('change organization parent fail')

    # 7 create virtual id group and add/remove role and attributes to/from it
    virtual_id_group_uuid = iam2_ops.create_iam2_virtual_id_group(
        project_uuid, 'test_virtual_id_group').uuid
    iam2_ops.add_roles_to_iam2_virtual_id_group([role_uuid],
                                                virtual_id_group_uuid)
    iam2_ops.remove_roles_from_iam2_virtual_idgroup([role_uuid],
                                                    virtual_id_group_uuid)
    # TODO:there is nothing to do with the below api in the first version of iam2
    # iam2_ops.add_attributes_to_iam2_virtual_id_group()
    # iam2_ops.remove_attributes_from_iam2_virtual_id_group()

    # 8 create virtual id and add/remove role or attributes to/from it
    password = '******'
    virtual_id_uuid = iam2_ops.create_iam2_virtual_id('username',
                                                      password).uuid
    iam2_ops.add_roles_to_iam2_virtual_id([role_uuid], virtual_id_uuid)
    iam2_ops.remove_roles_from_iam2_virtual_id([role_uuid], virtual_id_uuid)

    cond = res_ops.gen_query_conditions('virtualIDUuid', '=', virtual_id_uuid)
    attributes = [{"name": "__PlatformAdmin__"}]
    iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes)
    cond_01 = res_ops.gen_query_conditions('name', '=', "__PlatformAdmin__",
                                           cond)
    attribute_uuid = res_ops.query_resource_fields(
        res_ops.IAM2_VIRTUAL_ID_ATTRIBUTE, cond_01)[0].uuid
    iam2_ops.remove_attributes_from_iam2_virtual_id(virtual_id_uuid,
                                                    [attribute_uuid])
    attributes = [{"name": "__ProjectAdmin__", "value": project_uuid}]
    iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes)
    cond_02 = res_ops.gen_query_conditions('name', '=', "__ProjectAdmin__",
                                           cond)
    attribute_uuid = res_ops.query_resource_fields(
        res_ops.IAM2_VIRTUAL_ID_ATTRIBUTE, cond_02)[0].uuid
    iam2_ops.remove_attributes_from_iam2_virtual_id(virtual_id_uuid,
                                                    [attribute_uuid])

    # admin can't create Project operator
    # attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
    # iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes)
    # iam2_ops.remove_attributes_from_iam2_virtual_id(virtual_id_uuid, attributes)

    # 9 add virtual id to organization and set it as OrganizationSupervisor
    iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid],
                                                  department_01_uuid)

    attributes = [{
        "name": "__OrganizationSupervisor__",
        "value": virtual_id_uuid
    }]
    iam2_ops.add_attributes_to_iam2_organization(department_01_uuid,
                                                 attributes)
    cond_03 = res_ops.gen_query_conditions('name', '=',
                                           "__OrganizationSupervisor__")
    cond_03 = res_ops.gen_query_conditions('value', '=', virtual_id_uuid,
                                           cond_03)
    attribute_uuid = res_ops.query_resource(
        res_ops.IAM2_ORGANIZATION_ATTRIBUTE, cond_03)[0].uuid
    iam2_ops.remove_attributes_from_iam2_organization(department_01_uuid,
                                                      [attribute_uuid])

    iam2_ops.remove_iam2_virtual_ids_from_organization([virtual_id_uuid],
                                                       department_01_uuid)

    # 10 add virtual id to group and project
    iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid)
    iam2_ops.add_iam2_virtual_ids_to_group([virtual_id_uuid],
                                           virtual_id_group_uuid)
    iam2_ops.remove_iam2_virtual_ids_from_group([virtual_id_uuid],
                                                virtual_id_group_uuid)
    iam2_ops.remove_iam2_virtual_ids_from_project([virtual_id_uuid],
                                                  project_uuid)

    # 11 change state
    disable = 'disable'
    enable = 'enable'
    Disabled = 'Disabled'
    iam2_ops.change_iam2_organization_state(company_uuid_01, disable)
    res_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION,
                                   uuid=company_uuid_01)[0]
    if res_inv.state != Disabled:
        test_util.test_fail("test change iam2 organization state fail")
    iam2_ops.change_iam2_organization_state(company_uuid_01, enable)
    iam2_ops.change_iam2_organization_state(department_01_uuid, disable)
    iam2_ops.change_iam2_organization_state(department_01_uuid, enable)

    iam2_ops.change_iam2_project_state(project_uuid, disable)
    res_inv = res_ops.get_resource(res_ops.IAM2_PROJECT, uuid=project_uuid)[0]
    if res_inv.state != Disabled:
        test_util.test_fail("test change iam2 project state fail")
    iam2_ops.change_iam2_project_state(project_uuid, enable)

    iam2_ops.change_iam2_virtual_id_state(virtual_id_uuid, disable)
    res_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID,
                                   uuid=virtual_id_uuid)[0]
    if res_inv.state != Disabled:
        test_util.test_fail("test change iam2 virtual id state fail")
    iam2_ops.change_iam2_virtual_id_state(virtual_id_uuid, enable)

    iam2_ops.change_iam2_virtual_id_group_state(virtual_id_group_uuid, disable)
    res_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP,
                                   uuid=virtual_id_group_uuid)[0]
    if res_inv.state != Disabled:
        test_util.test_fail("test change iam2 virtual id group state fail")
    iam2_ops.change_iam2_virtual_id_group_state(virtual_id_group_uuid, enable)

    iam2_ops.change_role_state(role_uuid, disable)
    res_inv = res_ops.get_resource(res_ops.ROLE, uuid=role_uuid)[0]
    if res_inv.state != Disabled:
        test_util.test_fail("test change iam2 role state fail")
    iam2_ops.change_role_state(role_uuid, enable)

    # 12 update
    virtual_id_new_name = 'virtual_id_new_name'
    virtual_id_new_des = 'virtual_id_new_des'
    virtual_id_new_password = '******'

    iam2_ops.update_iam2_virtual_id(virtual_id_uuid, virtual_id_new_name,
                                    virtual_id_new_des,
                                    virtual_id_new_password)
    virtual_id_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID,
                                          uuid=virtual_id_uuid)[0]
    if virtual_id_inv.name != virtual_id_new_name:
        test_util.test_fail("update iam2 virtual id name fail")
    try:
        iam2_ops.login_iam2_virtual_id('username', password)
    except:
        test_util.test_logger("the old username and password can't login")
    try:
        virtual_id_session_uuid = iam2_ops.login_iam2_virtual_id(
            virtual_id_new_name, virtual_id_new_password)
        acc_ops.logout(virtual_id_session_uuid)
    except:
        test_util.test_fail("update iam2 virtual id name or password fail.")

    virtual_id_group_new_name = 'virtual_id_group_new_name'
    virtual_id_group_new_des = 'virtual_id_group_new_des'
    iam2_ops.update_iam2_virtual_id_group(virtual_id_group_uuid,
                                          virtual_id_group_new_name,
                                          virtual_id_group_new_des)
    virtual_id_group_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP,
                                                uuid=virtual_id_group_uuid)[0]
    if virtual_id_group_inv.name != virtual_id_group_new_name:
        test_util.test_fail("update iam2 virtual id group name fail")

    project_new_name = 'project_new_name'
    project_new_dsc = 'project_new_dsc'
    iam2_ops.update_iam2_project(project_uuid, project_new_name,
                                 project_new_dsc)
    project_inv = res_ops.get_resource(res_ops.IAM2_PROJECT,
                                       uuid=project_uuid)[0]
    if project_inv.name != project_new_name or project_inv.description != project_new_dsc:
        test_util.test_fail("update project information fail")

    company_new_name = 'company_new_name'
    company_new_dsc = 'company_new_dsc'
    iam2_ops.update_iam2_organization(company_uuid_02, company_new_name,
                                      company_new_dsc)
    organization_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION,
                                            uuid=company_uuid_02)[0]
    if organization_inv.name != company_new_name or organization_inv.description != company_new_dsc:
        test_util.test_fail("update organization name fail")

    # 13 delete
    iam2_ops.delete_iam2_organization(company_uuid_01)
    iam2_ops.delete_iam2_organization(company_uuid_02)
    iam2_ops.delete_iam2_organization(department_01_uuid)
    iam2_ops.delete_iam2_organization(department_02_uuid)
    iam2_ops.delete_iam2_virtual_id_group(virtual_id_group_uuid)
    iam2_ops.delete_iam2_project(project_uuid)
    iam2_ops.delete_iam2_project(project_02_uuid)
    iam2_ops.expunge_iam2_project(project_uuid)
    iam2_ops.expunge_iam2_project(project_02_uuid)
    iam2_ops.delete_iam2_project_template(project_template_01_uuid)
    iam2_ops.delete_iam2_project_template(project_template_02_uuid)
    iam2_ops.delete_iam2_virtual_id(virtual_id_uuid)
    iam2_ops.delete_role(role_uuid)

    iam2_ops.clean_iam2_enviroment()
    test_util.test_pass('success test iam2 login in by admin!')
Example #51
0
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [
        inventory.CEPH_PRIMARY_STORAGE_TYPE,
        inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint'
    ]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #l3_name = os.environ.get('l3NoVlanNetworkName1')
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    test_lib.clean_up_all_vr()
    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_ip = test_lib.lib_find_host_by_vr(vr).managementIp
    #    #ensure mn host has no vr
    #    if vr_ip == mn_ip:
    #        conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip)
    #        host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    #        vm_ops.migrate_vm(vr.uuid, host_uuid)
    #    vr_host_ips.append(vr_ip)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '=', mn_ip,
                                              conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm_status_runnning')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()
    vm.check()

    test_stub.ensure_host_has_no_vr(host_uuid)

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    host_port = test_lib.lib_get_host_port(host_ip)
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    test_stub.down_host_network(host_ip, test_lib.all_scenario_config)
    test_util.test_logger("wait for 30 seconds")
    time.sleep(30)
    test_stub.up_host_network(host_ip, test_lib.all_scenario_config)

    time.sleep(120)

    cmd = "nohup zstack-ctl start &"
    host_username = os.environ.get('hostUsername')
    host_password = os.environ.get('hostPassword')
    if not test_lib.lib_execute_ssh_cmd(
            mn_ip, host_username, host_password, cmd, timeout=300):
        test_util.test_fail("CMD:%s execute failed on %s" % (cmd, mn_ip))

    time.sleep(120)
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid)
    if not res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Running":
        test_util.test_fail("vm is not stopped as expected.")

    vm.destroy()

    #check mn service works normally
    time.sleep(20)
    vm.create()
    vm.check()
    vm.destroy()

    test_util.test_pass(
        'Test vm checking status after network disconnect and connect success')
Example #52
0
def test():
    global vms, exec_info, delete_policy1

    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
    if ps.type != inventory.CEPH_PRIMARY_STORAGE_TYPE:
        test_util.test_skip(
            'this test is for moniter expunge vm on ceph, not found ceph, skip test.'
        )

    delete_policy1 = test_lib.lib_set_delete_policy('vm', 'Delay')
    image_name = os.environ.get('imageName_s')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3NoVlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid

    cpuNum = 1
    memorySize = 268435456
    name = 'vm-offering-allocator-strategy'
    new_offering_option = test_util.InstanceOfferingOption()
    new_offering_option.set_cpuNum(cpuNum)
    new_offering_option.set_memorySize(memorySize)
    new_offering_option.set_name(name)
    new_offering = vm_ops.create_instance_offering(new_offering_option)
    test_obj_dict.add_instance_offering(new_offering)

    instance_offering_uuid = new_offering.uuid
    each_vm_cpu_consume = cpuNum

    vm_creation_option = test_util.VmOption()
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)

    #trigger vm create
    exec_info = []
    ts = []
    for i in range(vm_num):
        t = threading.Thread(target=create_vm_wrapper,
                             args=(i, vm_creation_option))
        ts.append(t)
        t.start()

    for t in ts:
        t.join()

    check_exception("created")

    #trigger vm destroy
    exec_info = []
    ts = []
    for i, vm in zip(range(vm_num), vms):
        t = threading.Thread(target=destroy_vm_wrapper, args=(i, vm.vm.uuid))
        ts.append(t)
        t.start()

    for t in ts:
        t.join()

    check_exception("destroyed")

    #trigger vm expunge
    exec_info = []
    ts = []
    for i, vm in zip(range(vm_num), vms):
        t = threading.Thread(target=expunge_vm_wrapper, args=(i, vm.vm.uuid))
        ts.append(t)
        t.start()

    for t in ts:
        t.join()

    check_exception("expunged")

    test_lib.lib_set_delete_policy('vm', delete_policy1)
    test_util.test_pass('Create VM Test Success')
Example #53
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid

    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    test_util.test_dsc('Add ISO Image')
    cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid
    img_option = test_util.ImageOption()
    img_option.set_name('iso')
    img_option.set_backup_storage_uuid_list([bs_uuid])
    os.system(
        "echo fake iso for test only >  %s/apache-tomcat/webapps/zstack/static/test.iso"
        % (os.environ.get('zstackInstallPath')))
    img_option.set_url('http://%s:8080/zstack/static/test.iso' %
                       (os.environ.get('node1Ip')))
    image_inv = img_ops.add_iso_template(img_option)
    image = test_image.ZstackTestImage()
    image.set_image(image_inv)
    image.set_creation_option(img_option)
    test_obj_dict.add_image(image)

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail(
            'VM is expected to stop when PS change to maintain state')
    vm.set_state(vm_header.STOPPED)
    vm.check()

    test_util.test_dsc('Attach ISO to VM')
    cond = res_ops.gen_query_conditions('name', '=', 'iso')
    iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
    img_ops.attach_iso(iso_uuid, vm.vm.uuid)

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)

    vm.start()
    vm.check()
    vm.destroy()
    vm.check()
    #vm.expunge()
    #vm.check()
    test_util.test_pass('PS maintain mode Test Success')
def test():
    global image1
    global test_obj_dict

    #run condition
    allow_bs_list = [
        inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE,
        inventory.CEPH_BACKUP_STORAGE_TYPE
    ]
    test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list)

    hosts = res_ops.query_resource(res_ops.HOST)
    if len(hosts) <= 1:
        test_util.test_skip(
            "skip for host_num is not satisfy condition host_num>1")
    bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE,
                                        bs_cond,
                                        None,
                                        fields=['uuid'])

    image_name1 = 'image1_a'
    image_option = test_util.ImageOption()
    image_option.set_format('qcow2')
    image_option.set_name(image_name1)
    #image_option.set_system_tags('qemuga')
    image_option.set_mediaType('RootVolumeTemplate')
    image_option.set_url(os.environ.get('imageUrl_s'))
    image_option.set_backup_storage_uuid_list([bss[0].uuid])
    image_option.set_timeout(3600 * 1000)

    image1 = zstack_image_header.ZstackTestImage()
    image1.set_creation_option(image_option)
    image1.add_root_volume_template()
    image1.check()

    #export image
    if bss[0].type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE]:
        image1.export()

    image_name = os.environ.get('imageName_net')
    l3_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vm('test-vm', image_name, l3_name)
    test_obj_dict.add_vm(vm)

    # clone vm
    cloned_vm_name = ['cloned_vm']
    cloned_vm_obj = vm.clone(cloned_vm_name)[0]
    test_obj_dict.add_vm(cloned_vm_obj)

    # delete image
    image1.delete()

    # vm ops test
    test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_MIGRATE")

    # expunge image
    image1.expunge()

    # vm ops test
    test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_ATTACH")

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Cloned VM ops for BS Success')
def Create():
    global session_uuid
    global session_to
    global session_mc
    vm_num = os.environ.get('ZSTACK_TEST_NUM')
    if not vm_num:
        vm_num = 700
    else:
        vm_num = int(vm_num)

    test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
    test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)

    org_num = vm_num
    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_s')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3PublicNetworkName')

    l3 = test_lib.lib_get_l3_by_name(l3_name)
    l3s = test_lib.lib_get_l3s()
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    #change account session timeout.
    session_to = con_ops.change_global_config('identity', 'session.timeout',
                                              '720000', session_uuid)
    session_mc = con_ops.change_global_config('identity',
                                              'session.maxConcurrent', '10000',
                                              session_uuid)

    session_uuid = acc_ops.login_as_admin()

    vm_creation_option.set_session_uuid(session_uuid)

    vm = test_vm_header.ZstackTestVm()
    random_name = random.random()
    vm_name = 'multihost_basic_vm_%s' % str(random_name)
    vm_creation_option.set_name(vm_name)

    while vm_num > 0:
        check_thread_exception()
        vm_creation_option.set_l3_uuids([l3.uuid])
        vm.set_creation_option(vm_creation_option)
        vm_num -= 1
        thread = threading.Thread(target=create_vm, args=(vm, ))
        while threading.active_count() > thread_threshold:
            time.sleep(1)
        thread.start()

    while threading.active_count() > 1:
        time.sleep(0.01)

    cond = res_ops.gen_query_conditions('name', '=', vm_name)
    vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
    con_ops.change_global_config('identity', 'session.timeout', session_to,
                                 session_uuid)
    con_ops.change_global_config('identity', 'session.maxConcurrent',
                                 session_mc, session_uuid)
    acc_ops.logout(session_uuid)
    if vms == org_num:
        print 'Create %d VMs Test Success.' % (org_num)
    else:
        test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' %
                            (org_num, vms))
Example #56
0
def test():
    global original_rate
    test_util.test_dsc('Test storage over provision method')
    zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid
    cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
    cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond)
    host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit=1)
    if not host:
        test_util.test_skip('No Enabled/Connected host was found, skip test.')
        return True

    ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE,
                                         cond,
                                         limit=1)
    if not ps:
        test_util.test_skip(
            'No Enabled/Connected primary storage was found, skip test.')
        return True

    host = host[0]
    ps = ps[0]
    ps_type = ps.type
    #TODO: Fix ceph testing
    if ps_type == 'Ceph' or ps_type == 'SharedMountPoint':
        test_util.test_skip('skip test for ceph and smp.')

    over_provision_rate = 2.5
    target_volume_num = 12
    kept_disk_size = 10 * 1024 * 1024

    vm = test_stub.create_vm(vm_name = 'storage_over_prs_vm_1', \
                    host_uuid = host.uuid)
    test_obj_dict.add_vm(vm)
    vm.check()

    avail_cap = get_storage_capacity(ps_type, host.uuid, ps.uuid)
    if avail_cap < kept_disk_size:
        test_util.test_skip(
            'available disk capacity:%d is too small, skip test.' % avail_cap)
        return True

    original_rate = test_lib.lib_set_provision_storage_rate(
        over_provision_rate)
    data_volume_size = int(over_provision_rate * (avail_cap - kept_disk_size) /
                           target_volume_num)

    #will change the rate back to check if available capacity is same with original one. This was a bug, that only happened when system create 1 vm.
    test_lib.lib_set_provision_storage_rate(original_rate)
    avail_cap_tmp = get_storage_capacity(ps_type, host.uuid, ps.uuid)
    if avail_cap != avail_cap_tmp:
        test_util.test_fail(
            'disk size is not same, between 2 times provision. Before change over rate, 1st cap: %d; 2nd cap: %d'
            % (avail_cap, avail_cap_tmp))

    test_lib.lib_set_provision_storage_rate(over_provision_rate)
    test_util.test_logger(
        'Will create a serial of volume. Each of them will have %d size.' %
        data_volume_size)
    disk_offering_option = test_util.DiskOfferingOption()
    disk_offering_option.set_name('storage-over-ps-test')
    disk_offering_option.set_diskSize(data_volume_size)
    data_volume_offering = vol_ops.create_volume_offering(disk_offering_option)
    test_obj_dict.add_disk_offering(data_volume_offering)

    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(data_volume_offering.uuid)

    times = 1
    while (times <= target_volume_num):
        try:
            volume_creation_option.set_name('volume-%d' % times)
            volume = test_stub.create_volume(volume_creation_option)
            test_obj_dict.add_volume(volume)
            test_util.test_logger(
                'Current available storage size: %d' %
                get_storage_capacity(ps_type, host.uuid, ps.uuid))
            volume.attach(vm)
        except Exception as e:
            test_util.test_logger(
                "Unexpected volume Creation Failure in storage over provision test. "
            )
            raise e

        times += 1

    time.sleep(2)
    avail_cap2 = get_storage_capacity(ps_type, host.uuid, ps.uuid)
    if avail_cap2 > data_volume_size:
        test_util.test_fail(
            'Available disk size: %d is still bigger than offering disk size: %d , after creating %d volumes.'
            % (avail_cap2, data_volume_size, target_volume_num))

    try:
        volume_creation_option.set_name('volume-%d' % (times + 1))
        volume = test_stub.create_volume(volume_creation_option)
        test_obj_dict.add_volume(volume)
        volume.attach(vm)
    except:
        test_util.test_logger(
            "Expected Volume Creation Failure in storage over provision test. "
        )
    else:
        test_util.test_fail(
            "The %dth Volume is still attachable, which is wrong" %
            (target_volume_num + 1))

    test_lib.lib_set_provision_storage_rate(original_rate)
    test_lib.lib_robot_cleanup(test_obj_dict)

    test_util.test_pass('Memory Over Provision Test Pass')
def test():

    global email_platform_uuid, email_endpoint_uuid, http_endpoint_uuid, sns_topic_uuid

    # create platform
    smtp_server = os.environ.get('smtpServer')
    smtp_port = os.environ.get('smtpPort')
    email_platform_name = 'Alarm_email'
    email_username = os.environ.get('mailUsername')
    email_password = os.environ.get('mailPassword')
    email_platform = zwt_ops.create_sns_email_platform(smtp_server, smtp_port,
                                                       email_platform_name,
                                                       email_username,
                                                       email_password)
    email_platform_uuid = email_platform.uuid
    cond = res_ops.gen_query_conditions('uuid', '=', email_platform_uuid)
    inv = res_ops.query_resource(res_ops.SNS_EMAIL_PLATFORM, cond)
    if not inv:
        test_util.test_fail('create sns email platform failed')
    try:
        zwt_ops.validate_sns_email_platform(email_platform_uuid)
    except:
        test_util.test_fail(
            'Validate SNS Email Platform Failed, Email Plarform: %s' %
            email_platform_uuid)

    # create endpoint
    email_receiver = os.environ.get('mailUsername')
    email_endpoint_name = os.environ.get('mailPassword')
    email_endpoint_uuid = zwt_ops.create_sns_email_endpoint(
        email_receiver, email_endpoint_name, email_platform_uuid).uuid
    cond = res_ops.gen_query_conditions('uuid', '=', email_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_EMAIL_ENDPOINT, cond)
    if not inv:
        test_util.test_fail('create sns email endpoint failed')
    http_endpoint_name = 'http'
    url = 'http://localhost:8080/webhook-url'
    http_username = '******'
    http_password = '******'
    http_endpoint = zwt_ops.create_sns_http_endpoint(url, http_endpoint_name,
                                                     http_username,
                                                     http_password)
    http_endpoint_uuid = http_endpoint.uuid
    cond = res_ops.gen_query_conditions('uuid', '=', http_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_HTTP_ENDPOINT, cond)
    if not inv:
        test_util.test_fail('create sns http endpoint failed')

    # create sns topic and query system-in topic
    sns_topic_uuid = zwt_ops.create_sns_topic('sns_topic_01').uuid
    zwt_ops.subscribe_sns_topic(sns_topic_uuid, email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('endpoints.uuid', '=',
                                        email_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)
    if not inv:
        test_util.test_fail('create and subscribe snstopic failed')
    cond = res_ops.gen_query_conditions('name', '=', 'system-alarm')
    system_alarm_topic = res_ops.query_resource(res_ops.SNS_TOPIC, cond)[0]
    system_alarm_topic_uuid = system_alarm_topic.uuid
    zwt_ops.subscribe_sns_topic(system_alarm_topic_uuid, email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('endpoints.uuid', '=',
                                        email_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)
    if not inv:
        test_util.test_fail('subscribe system-alarm topic failed')
    cond = res_ops.gen_query_conditions('name', '=', 'api')
    api_topic = res_ops.query_resource(res_ops.SNS_TOPIC, cond)[0]
    api_topic_uuid = api_topic.uuid
    zwt_ops.subscribe_sns_topic(api_topic_uuid, http_endpoint_uuid)
    cond = res_ops.gen_query_conditions('endpointUuid', '=',
                                        http_endpoint_uuid)
    cond = res_ops.gen_query_conditions('topicUuid', '=', api_topic_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC_SUBSCRIBER, cond)
    if not inv:
        test_util.test_fail('subscribe api topic failed')

    # subscribe event
    namespace = 'ZStack/VM'
    actions = [{"actionUuid": sns_topic_uuid, "actionType": "sns"}]
    labels = [{"key": "NewState", "op": "Equal", "value": "Disconnected"}]
    event_name = 'VMStateChangedOnHost'
    event_sub_uuid = zwt_ops.subscribe_event(namespace, event_name, actions,
                                             labels).uuid
    cond = res_ops.gen_query_conditions('uuid', '=', event_sub_uuid)
    event_subscription = res_ops.query_resource(res_ops.EVENT_SUBSCRIPTION,
                                                cond)
    if not event_subscription:
        test_util.test_fail('Subscribe event failed')

    #update endpoint
    new_name = 'endpointNewName'
    new_description = 'endpoint new description'
    zwt_ops.update_sns_application_endpoint(email_endpoint_uuid, new_name,
                                            new_description)
    cond = res_ops.gen_query_conditions('uuid', '=', email_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_ENDPOINT, cond)[0]
    if inv.name != new_name or inv.description != new_description:
        test_util.test_fail('test update email endpoint failed')
    zwt_ops.update_sns_application_endpoint(http_endpoint_uuid, new_name,
                                            new_description)
    cond = res_ops.gen_query_conditions('uuid', '=', http_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_ENDPOINT, cond)[0]
    if inv.name != new_name or inv.description != new_description:
        test_util.test_fail('test update http endpoint failed')
    new_name_platform = 'platformNewName'
    new_description_platform = 'platformNewName'
    zwt_ops.update_sns_application_platform(email_platform_uuid,
                                            new_name_platform,
                                            new_description_platform)
    cond = res_ops.gen_query_conditions('uuid', '=', email_platform_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_PLATFORM, cond)[0]
    if inv.name != new_name_platform or inv.description != new_description_platform:
        test_util.test_fail('test update email platform failed')

    #change state
    state_event = 'disable'
    state_result = 'Disabled'
    zwt_ops.change_sns_topic_state(system_alarm_topic_uuid, state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', system_alarm_topic_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change system alarm topic state failed')
    zwt_ops.change_sns_topic_state(api_topic_uuid, state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', api_topic_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change api topic state failed')
    zwt_ops.change_sns_application_endpoint_state(email_endpoint_uuid,
                                                  state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', email_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_ENDPOINT, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change email endpoint state failed')
    zwt_ops.change_sns_application_endpoint_state(http_endpoint_uuid,
                                                  state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', http_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_ENDPOINT, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change http endpoint state failed')
    zwt_ops.change_sns_application_platform_state(email_platform_uuid,
                                                  state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', email_platform_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_PLATFORM, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change email platform state failed')

    # test recover and delete
    state_event = 'enable'
    state_result = 'Enabled'
    zwt_ops.change_sns_topic_state(system_alarm_topic_uuid, state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', system_alarm_topic_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change system alarm topic state failed')
    zwt_ops.change_sns_topic_state(api_topic_uuid, state_event)
    cond = res_ops.gen_query_conditions('uuid', '=', api_topic_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)[0]
    if inv.state != state_result:
        test_util.test_fail('change api topic state failed')
    zwt_ops.unsubscribe_event(event_sub_uuid)
    cond = res_ops.gen_query_conditions('uuid', '=', event_sub_uuid)
    event_subscription = res_ops.query_resource(res_ops.EVENT_SUBSCRIPTION,
                                                cond)
    if event_subscription:
        test_util.test_fail('unsubscribe event failed')
    zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('endpointUuid', '=',
                                        email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('topicUuid', '=', sns_topic_uuid, cond)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC_SUBSCRIBER, cond)
    if inv:
        test_util.test_fail('unsubscribe sns topic failed')
    zwt_ops.unsubscribe_sns_topic(system_alarm_topic_uuid, email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('endpointUuid', '=',
                                        email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('topicUuid', '=',
                                        system_alarm_topic_uuid, cond)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC_SUBSCRIBER, cond)
    if inv:
        test_util.test_fail('unsubscribe system alarm topic failed')
    zwt_ops.unsubscribe_sns_topic(api_topic_uuid, http_endpoint_uuid)
    cond = res_ops.gen_query_conditions('endpointUuid', '=',
                                        http_endpoint_uuid)
    cond = res_ops.gen_query_conditions('topicUuid', '=', api_topic_uuid, cond)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC_SUBSCRIBER, cond)
    if inv:
        test_util.test_fail('unsubscribe api topic failed')
    zwt_ops.delete_sns_topic(sns_topic_uuid)
    cond = res_ops.gen_query_conditions('uuid', '=', sns_topic_uuid)
    inv = res_ops.query_resource(res_ops.SNS_TOPIC, cond)
    if inv:
        test_util.test_fail('delete sns topic failed')
    zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid)
    cond = res_ops.gen_query_conditions('uuid', '=', http_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_ENDPOINT, cond)
    if inv:
        test_util.test_fail('delete http endpoint failed')
    zwt_ops.delete_sns_application_endpoint(email_endpoint_uuid)
    cond = res_ops.gen_query_conditions('uuid', '=', email_endpoint_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_ENDPOINT, cond)
    if inv:
        test_util.test_fail('delete email endpoint failed')
    zwt_ops.delete_sns_application_platform(email_platform_uuid)
    cond = res_ops.gen_query_conditions('uuid', '=', email_platform_uuid)
    inv = res_ops.query_resource(res_ops.SNS_APPLICATION_PLATFORM, cond)
    if inv:
        test_util.test_fail('delete email platform failed')

    test_util.test_pass('success test event with email endpoint basic option!')
def test():
    global mevoco1_ip
    global mevoco2_ip
    global ipsec1
    global ipsec2
    mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mevoco2_ip = os.environ['secondZStackMnIp']
    test_util.test_dsc('Create test vm in mevoco1')
    vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
    test_obj_dict1.add_vm(vm1)
    vm1.check()
    vm_nic1 = vm1.get_vm().vmNics[0]
    vm_nic1_uuid = vm_nic1.uuid
    vm3 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
    test_obj_dict1.add_vm(vm3)
    vm3.check()
    vm_nic3 = vm3.get_vm().vmNics[0]
    vm_nic3_uuid = vm_nic3.uuid
    pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid
    vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0]
    l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid
    vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1)
    vip1 = test_stub.get_snat_ip_as_vip(vr1_pub_ip)
    vip_uuid = vip1.get_vip().uuid
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1)
    first_zstack_cidrs = res_ops.query_resource(
        res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    test_util.test_dsc('Create test vm in mevoco2')
    vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName'))
    test_obj_dict2.add_vm(vm2)
    vm2.check()
    pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid
    vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0]
    l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid
    vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2)
    cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2)
    second_zstack_cidrs = res_ops.query_resource(
        res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_util.test_dsc('Create PF in mevoco1')
    l3_name = os.environ.get('l3NoVlanNetworkName1')
    vr = test_stub.create_vr_vm(test_obj_dict1, l3_name)
    l3_name = os.environ.get('l3VlanNetworkName4')
    vr = test_stub.create_vr_vm(test_obj_dict1, l3_name)
    vr_pub_ip = test_lib.lib_find_vr_pub_ip(vr)
    pf_creation_opt1 = PfRule.generate_pf_rule_option(
        vr_pub_ip,
        protocol=inventory.TCP,
        vip_target_rule=Port.rule4_ports,
        private_target_rule=Port.rule4_ports,
        vip_uuid=vip_uuid)
    test_pf1 = zstack_pf_header.ZstackTestPortForwarding()
    test_pf1.set_creation_option(pf_creation_opt1)
    test_pf1.create()
    vip1.attach_pf(test_pf1)
    vip1.check()
    test_pf1.attach(vm_nic1_uuid, vm1)
    vip1.check()

    test_util.test_dsc('Create LB in mevoco1')
    lb = zstack_lb_header.ZstackTestLoadBalancer()
    lb.create('create lb test', vip1.get_vip().uuid)
    test_obj_dict1.add_load_balancer(lb)
    vip1.attach_lb(lb)
    lb_creation_option = test_lib.lib_create_lb_listener_option(lbl_port=222,
                                                                lbi_port=22)
    lbl = lb.create_listener(lb_creation_option)
    lbl.add_nics([vm_nic1_uuid, vm_nic3_uuid])
    lb.check()
    vip1.check()

    test_util.test_dsc('Create ipsec in mevoco1')
    ipsec1 = ipsec_ops.create_ipsec_connection('ipsec1', pri_l3_uuid1,
                                               vip2.get_vip().ip, '123456',
                                               vip1.get_vip().uuid,
                                               [second_zstack_cidrs])

    vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid)
    assert "IPsec" in vip1_db.useFor
    assert vip1_db.useFor.count("IPsec") == 1

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    test_util.test_dsc('Create ipsec in mevoco2')
    ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2,
                                               vip1.get_vip().ip, '123456',
                                               vip2.get_vip().uuid,
                                               [first_zstack_cidrs])

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip):
        test_util.test_fail(
            'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' %
            (mevoco1_ip, mevoco2_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip):
        test_util.test_fail(
            'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' %
            (mevoco2_ip, mevoco1_ip))

    # delete ipsec
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    ipsec_ops.delete_ipsec_connection(ipsec1.uuid)

    if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail(
            'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted'
            % (mevoco1_ip, mevoco2_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True):
        test_util.test_fail(
            'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted'
            % (mevoco2_ip, mevoco1_ip))

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip

    vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid)
    assert "IPsec" not in vip1_db.useFor
    # delete PF
    test_pf1.delete()
    vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid)
    assert "PortForwarding" not in vip1_db.useFor

    # delete LB
    lb.delete()
    vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid)
    assert "LoadBalancer" not in vip1_db.useFor

    test_lib.lib_error_cleanup(test_obj_dict1)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
    ipsec_ops.delete_ipsec_connection(ipsec2.uuid)
    vip2.delete()
    test_lib.lib_error_cleanup(test_obj_dict2)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
    test_util.test_pass('Create multiple service with 1 snat IP Success')
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    must_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE]
    test_lib.skip_test_if_any_ps_not_deployed(must_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #l3_name = os.environ.get('l3NoVlanNetworkName1')
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    vr_hosts = test_stub.get_host_has_vr()
    mn_hosts = test_stub.get_host_has_mn()
    nfs_hosts = test_stub.get_host_has_nfs()
    if not test_stub.ensure_vm_not_on(vm.get_vm().uuid,
                                      vm.get_vm().hostUuid,
                                      vr_hosts + mn_hosts + nfs_hosts):
        test_util.test_fail("Not find out a suitable host")

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    host_port = test_lib.lib_get_host_port(host_ip)
    test_util.test_logger("host %s is disconnecting" % (host_ip))
    host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid
    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
    #    l2_network_interface = os.environ.get('l2ManagementNetworkInterface')
    #l2interface = test_lib.lib_get_l2s_by_vm(vm.get_vm())[0].physicalInterface
    l2_network_interface = test_stub.get_host_l2_nic_name("br_eth0")
    cmd = "ifconfig %s down && sleep 180 && ifconfig %s up" % (
        l2_network_interface, l2_network_interface)
    host_username = os.environ.get('hostUsername')
    host_password = os.environ.get('hostPassword')
    rsp = test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password,
                                       cmd, 240)
    if not rsp:
        test_util.test_logger(
            "host is expected to shutdown after its network down for a while")

    #test_util.test_logger("wait for 600 seconds")
    test_util.test_logger("wait for 180 seconds")
    time.sleep(180)
    vm.update()
    if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip:
        test_util.test_fail("VM is expected to start running on another host")
    vm.set_state(vm_header.RUNNING)
    vm.check()
    vm.destroy()

    cmd = 'PORT=%s bash -ex %s %s' % (
        host_port, os.environ.get('hostRecoverScript'), host_ip)
    test_util.test_logger(cmd)
    os.system(cmd)
    host_ops.reconnect_host(host_uuid)
    test_util.test_pass('Test VM ha on host failure Success')
Example #60
0
def test():
    global delete_policy
    delete_policy = test_lib.lib_set_delete_policy('image', 'Direct')

    os.system('dd if=/dev/zero of=%s bs=1M count=300' % test_image)
    time.sleep(1)
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
    img_ops.reconnect_sftp_backup_storage(bs.uuid)
    time.sleep(1)
    image_name = 'test-image-%s' % time.time()
    image_option = test_util.ImageOption()
    image_option.set_name(image_name)
    image_option.set_description(
        'test image which is upload from local filesystem.')
    image_option.set_url('file://%s' % test_image)
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
    avail_cap = bs.availableCapacity
    total_cap = bs.totalCapacity

    image_option.set_backup_storage_uuid_list([bs.uuid])
    image_option.set_format('raw')
    image_option.set_mediaType('RootVolumeTemplate')
    image_inv = img_ops.add_root_volume_template(image_option)
    time.sleep(10)
    image = zstack_image_header.ZstackTestImage()
    image.set_creation_option(image_option)
    image.set_image(image_inv)
    test_obj_dict.add_image(image)

    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
    avail_cap1 = bs.availableCapacity
    total_cap1 = bs.totalCapacity

    if total_cap != total_cap1:
        test_util.test_fail(
            'Backup storage total capacity is not same, after adding new image: %s. The previous value: %s, the current value: %s'
            % (image_inv.uuid, total_cap, total_cap1))

    if avail_cap <= avail_cap1:
        test_util.test_fail(
            'Backup storage available capacity is not correct, after adding new image: %s. The previous value: %s, the current value: %s'
            % (image_inv.uuid, avail_cap, avail_cap1))

    image.delete()
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
    avail_cap2 = bs.availableCapacity
    total_cap2 = bs.totalCapacity

    if total_cap != total_cap2:
        test_util.test_fail(
            'Backup storage total capacity is not same, after deleting new image: %s. The previous value: %s, the current value: %s'
            % (image_inv.uuid, total_cap, total_cap2))

    if avail_cap > (avail_cap2 + 1024000) or avail_cap < avail_cap2:
        test_util.test_fail(
            'Backup storage available capacity is not correct, after adding and deleting new image: %s. The previous value: %s, the current value: %s'
            % (image_inv.uuid, avail_cap, avail_cap2))

    os.system('rm -f %s' % test_image)
    test_lib.lib_set_delete_policy('image', delete_policy)
    test_util.test_pass(
        'Test backup storage capacity for adding/deleting image pass.')