コード例 #1
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to running when PS change to disable state')

    vm.set_state(vm_header.RUNNING)
    vm.check()
    vm.destroy()
    vm.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)
    test_util.test_pass('PS disable mode Test Success')
def env_recover():
    global ps_uuid
    if ps_uuid != None:
        try:
            ps_ops.change_primary_storage_state(ps_uuid, 'enable')
        except:
            pass
コード例 #3
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    test_util.test_dsc('Add ISO Image')
    cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid
    img_option = test_util.ImageOption()
    img_option.set_name('iso')
    img_option.set_backup_storage_uuid_list([bs_uuid])
    os.system("echo fake iso for test only >  %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath')))
    img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip')))
    image_inv = img_ops.add_iso_template(img_option)
    image = test_image.ZstackTestImage()
    image.set_image(image_inv)
    image.set_creation_option(img_option)
    test_obj_dict.add_image(image)


    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to maintain state')
    vm.set_state(vm_header.STOPPED)
    vm.check()

    test_util.test_dsc('Attach ISO to VM')
    cond = res_ops.gen_query_conditions('name', '=', 'iso')
    iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
    img_ops.attach_iso(iso_uuid, vm.vm.uuid)


    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)  

    vm.start()
    vm.check()
    vm.destroy()
    vm.check()
    #vm.expunge()
    #vm.check()
    test_util.test_pass('PS maintain mode Test Success')
コード例 #4
0
def test():
    ps_env = test_stub.PSEnvChecker()

    nfs_ps = ps_env.get_random_nfs()

    test_util.test_dsc("Create 1 vm  with {} data volume".format(VOLUME_NUMBER))
    vm = test_stub.create_multi_vms(name_prefix='test-', count=1, data_volume_number=VOLUME_NUMBER)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_dsc("disable NFS PS")
    ps_ops.change_primary_storage_state(nfs_ps.uuid, state='disable')
    disabled_ps_list.append(nfs_ps)

    test_util.test_dsc("make sure VM till OK and running")
    vm.update()
    vm.check()
    assert vm.get_vm().state == inventory.RUNNING

    with test_stub.expected_failure("Create datavol in nfs-local env when nfs disabled", Exception):
        test_stub.create_multi_volumes(count=1, ps=nfs_ps)

    test_util.test_dsc("Try to create vm")
    new_vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1)[0]
    test_obj_dict.add_vm(new_vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #5
0
def env_recover():
    local_ps, shared_ps = test_stub.PSEnvChecker().get_two_ps()
    if local_ps.state == 'Disabled':
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if shared_ps.state == 'Disabled':
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #6
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to maintain state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)  

    vm.start()
    vm.check()
    vm.destroy()
    test_util.test_pass('PS maintain mode Test Success')
コード例 #7
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for disabled_ps in disabled_ps_list:
        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
コード例 #8
0
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    first_ps_volume_list = env.first_ps_volume_list
    second_ps_vm_list = env.second_ps_vm_list
    second_ps_volume_list = env.second_ps_volume_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)
    tbj_list = first_ps_vm_list + second_ps_vm_list + first_ps_volume_list + second_ps_volume_list

    test_util.test_dsc('Disable All Primary Storage')
    for ps in [env.first_ps, env.second_ps]:
        ps_ops.change_primary_storage_state(ps.uuid, state='disable')
        disabled_ps_list.append(ps)

    test_util.test_dsc('make sure all VM and Volumes still OK and running')
    for test_object in tbj_list:
        test_object.check()

    test_util.test_dsc("Try to Create one vm")
    with test_stub.expected_failure("Create vm when no ps in enable status", Exception):
        test_stub.create_multi_vms(name_prefix='test-vm', count=1)

    test_util.test_dsc("Try to Create one volume")
    with test_stub.expected_failure("Create volume when no ps in enable status", Exception):
        test_stub.create_multi_volumes(count=1, ps=random.choice([env.first_ps, env.second_ps]))

    test_util.test_dsc("enable All primaryStorage")
    for ps in [env.first_ps, env.second_ps]:
        ps_ops.change_primary_storage_state(ps.uuid, state='enable')
        disabled_ps_list.remove(ps)

    test_util.test_dsc("Try to create vm in both PrimaryStorage")
    if ps_env.is_sb_ceph_env:
        vm1 = test_stub.create_multi_vms(name_prefix='test-vm_first_ps', count=1, ps_uuid=env.first_ps.uuid, bs_type='ImageStoreBackupStorage')[0]
        vm2 = test_stub.create_multi_vms(name_prefix='test-vm_second_ps', count=1, ps_uuid=env.second_ps.uuid, bs_type='Ceph')[0]
    else:
        vm1 = test_stub.create_multi_vms(name_prefix='test-vm_first_ps', count=1, ps_uuid=env.first_ps.uuid)[0]
        vm2 = test_stub.create_multi_vms(name_prefix='test-vm_second_ps', count=1, ps_uuid=env.second_ps.uuid)[0]
    test_obj_dict.add_vm(vm1)
    test_obj_dict.add_vm(vm2)


    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #9
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    test_lib.lib_set_delete_policy('vm', 'Delay')
    test_lib.lib_set_delete_policy('volume', 'Delay')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName'))
    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi'])
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume.delete()
    volume.check()

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to maintain state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
    volume.recover()
    volume.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)  

    vm.start()
    vm.check()

    volume.delete()
    #volume.expunge()
    volume.check()

    vm.destroy()
    test_lib.lib_set_delete_policy('vm', 'Direct')
    test_lib.lib_set_delete_policy('volume', 'Direct')
    test_util.test_pass('Delete volume under PS maintain mode Test Success')
def test():
    global vm
    global test_host
    global ps_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout


    allow_ps_list = [inventory.LOCAL_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('ls_vm_ha_self_start')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()


    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to maintain state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
	
    ps_ops.change_primary_storage_state(ps_uuid, 'enable')

    for i in range(0, 300):
        if res_ops.query_resource(res_ops.VM_INSTANCE, conditions)[0].state == "Running":
            break
        time.sleep(1)
    else:
        test_util.test_fail("vm has not been changed to running as expected within 300s.")

    vm.destroy()

    test_util.test_pass('Test checking VM ha and none status when force stop vm Success.')
コード例 #11
0
def env_recover():
    local_ps, shared_ps = test_stub.PSEnvChecker().get_two_ps()
    if local_ps.state in ('Disabled', "Maintenance"):
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if shared_ps.state in ('Disabled', "Maintenance"):
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')
    for vr in res_ops.get_resource(res_ops.APPLIANCE_VM):
        if vr.state != inventory.RUNNING:
            vm_ops.start_vm(vr.uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #12
0
def error_cleanup():
    global ps_uuid
    if ps_uuid != None:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    global host_uuid
    if host_uuid != None:
        host_ops.reconnect_host(host_uuid)
    global vr_uuid
    if vr_uuid != None:
        vm_ops.reconnect_vr(vr_uuid)
    global test_obj_dict
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #13
0
def error_cleanup():
    global ps_uuid
    test_lib.lib_set_delete_policy('vm', 'Direct')
    test_lib.lib_set_delete_policy('volume', 'Direct')
    if ps_uuid != None:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    global host_uuid
    if host_uuid != None:
        host_ops.reconnect_host(host_uuid)
    global vr_uuid
    if vr_uuid != None:
        vm_ops.reconnect_vr(vr_uuid)
    global test_obj_dict
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #14
0
def error_cleanup():
    global ps_uuid
    if ps_uuid != None:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    global host_uuid
    if host_uuid != None:
        host_ops.reconnect_host(host_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)
    #global vr_uuid
    #if vr_uuid != None:
    #    vm_ops.reconnect_vr(vr_uuid)
    global test_obj_dict
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #15
0
def error_cleanup():
    global ps_uuid
    if ps_uuid != None:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    global host_uuid
    if host_uuid != None:
        host_ops.reconnect_host(host_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)  
    #global vr_uuid
    #if vr_uuid != None:
    #    vm_ops.reconnect_vr(vr_uuid)
    global test_obj_dict
    test_lib.lib_error_cleanup(test_obj_dict)
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    test_lib.lib_set_delete_policy('vm', 'Delay')
    test_lib.lib_set_delete_policy('volume', 'Delay')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName'))
    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi'])
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume.attach(vm)

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to running when PS change to disable state')

    vm.set_state(vm_header.RUNNING)
    vm.check()
    volume.delete()
    volume.check()
    volume.expunge()
    volume.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)
    vm.destroy()
    test_lib.lib_set_delete_policy('vm', 'Direct')
    test_lib.lib_set_delete_policy('volume', 'Direct')
    test_util.test_pass('Delete volume under PS disable mode Test Success')
コード例 #17
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')

    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm())

    test_util.test_dsc('create snapshot and check')
    snapshots = test_obj_dict.get_volume_snapshot(root_volume_uuid)
    snapshots.set_utility_vm(vm)
    vm.check()
    snapshots.create_snapshot('create_root_snapshot1')
    snapshots.check()
    snapshot1 = snapshots.get_current_snapshot()

    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail(
            'VM is expected to running when PS change to disable state')

    vm.set_state(vm_header.RUNNING)
    vm.check()
    vm.stop()
    vm.check()

    test_util.test_dsc('Use snapshot, volume and check')
    snapshots.use_snapshot(snapshot1)
    snapshots.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)
    vm.destroy()
    test_util.test_pass('PS disable mode Test Success')
コード例 #18
0
def error_cleanup():
    global ps_uuid

    test_lib.lib_set_delete_policy('vm', 'Direct')
    test_lib.lib_set_delete_policy('volume', 'Direct')

    if ps_uuid != None:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    global host_uuid
    if host_uuid != None:
        host_ops.reconnect_host(host_uuid)
    global vr_uuid
    if vr_uuid != None:
        vm_ops.reconnect_vr(vr_uuid)
    global test_obj_dict
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #19
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName'))
    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi'])
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume.attach(vm)

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to running when PS change to disable state')

    vm.set_state(vm_header.RUNNING)
    vm.check()
    volume.detach(vm.get_vm().uuid)

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)

    volume.delete()
    #volume.expunge()
    volume.check()
    vm.destroy()

    test_util.test_pass('Delete volume under PS disable mode Test Success')
コード例 #20
0
def test():
    ps_env = test_stub.PSEnvChecker()

    local_ps = ps_env.get_random_local()
    nfs_ps = ps_env.get_random_nfs()

    test_util.test_dsc("Create {0} vm ".format(VM_COUNT))
    vm = test_stub.create_multi_vms(name_prefix='test-', count=VM_COUNT)[0]
    vm.check()
    test_obj_dict.add_vm(vm)

    test_util.test_dsc("Create {0} volumes in NFS".format(VOLUME_NUMBER))
    volume_in_nfs = test_stub.create_multi_volumes(count=VOLUME_NUMBER,
                                                   ps=nfs_ps)
    for volume in volume_in_nfs:
        test_obj_dict.add_volume(volume)
        volume.check()

    test_util.test_dsc("Attach all volumes to VM")
    for volume in volume_in_nfs:
        volume.attach(vm)
        volume.check()

    test_util.test_dsc("disable local PS")
    ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
    disabled_ps_list.append(local_ps)

    test_util.test_dsc("make sure all VM and Volumes still OK and running")
    vm.check()
    for volume in volume_in_nfs:
        volume.check()

    test_util.test_dsc("Try to create vm with datavolume")
    with test_stub.expected_failure(
            'Create vm with datavol in nfs-local env when local disabled',
            Exception):
        test_stub.create_multi_vms(name_prefix='test-vm',
                                   count=1,
                                   datavolume=10)

    test_util.test_dsc("Try to create datavolume in NFS")
    volume = test_stub.create_multi_volumes(count=1, ps=nfs_ps)[0]
    test_obj_dict.add_volume(volume)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #21
0
def test():
    global vm
    ova_image_name = 'centos-dhcp'
    network_pattern1 = os.environ['vcenterDefaultNetwork']
    cpuNum = 2
    memorySize = 2*1024*1024*1024

    cond = res_ops.gen_query_conditions('type', '!=', 'Vcenter')
    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
    for i in ps:
        if (i.type == 'Ceph') or (i.type == 'Sharedblock'):
            break
    else:
        test_util.test_skip('Skip test on non ceph or sharedblock PS')
    ps_uuid = ps[0].uuid
    cond = res_ops.gen_query_conditions('primaryStorage.uuid', '=', ps_uuid)
    cluster_uuid = res_ops.query_resource(res_ops.CLUSTER, cond)[0].uuid
    cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster_uuid)    
    host = res_ops.query_resource(res_ops.HOST, cond)[0]  

    new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum, memorySize = memorySize)
    vm = test_stub.create_vm_in_vcenter(vm_name = 'v2v-test', image_name = ova_image_name, l3_name = network_pattern1, instance_offering_uuid = new_offering.uuid)
    vm.check()
    disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume_creation_option.set_name('vcenter_volume')
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume.attach(vm)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    v2v_conversion_host = test_stub.add_v2v_conversion_host('v2v_host', host.uuid, '/tmp/zstack', 'VMWARE')
    url = 'vmware://%s' % vm.vm.uuid
    migrate_task = test_stub.convert_vm_from_foreign_hypervisor('test', url, cpuNum, memorySize, ps_uuid, [l3_uuid], cluster_uuid, v2v_conversion_host.uuid)
    time.sleep(10)
    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    ps_ops.change_primary_storage_state(ps_uuid, state='disable')

    rerunLongJob(migrate_task.uuid, ps_uuid, 1, 0) 
    #cleanup
    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass("Rerun v2v long job test passed.")
コード例 #22
0
def test():
    env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                         first_ps_vm_number=VM_COUNT,
                                         second_ps_vm_number=VM_COUNT,
                                         first_ps_volume_number=VOLUME_NUMBER,
                                         second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    first_ps_volume_list = env.first_ps_volume_list
    second_ps_vm_list = env.second_ps_vm_list
    second_ps_volume_list = env.second_ps_volume_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)
    tbj_list = first_ps_vm_list + second_ps_vm_list + first_ps_volume_list + second_ps_volume_list

    for _ in xrange(LOOP):
        test_util.test_dsc('Disable random one Primary Storage')
        disabled_ps = random.choice([env.first_ps, env.second_ps])
        if disabled_ps is env.first_ps:
            enabled_ps = env.second_ps
        else:
            enabled_ps = env.first_ps
        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='disable')
        disabled_ps_list.append(disabled_ps)

        test_util.test_dsc('make sure all VM and Volumes still OK and running')
        for test_object in tbj_list:
            test_object.check()

        test_util.test_dsc("Try to Create vm in disabeld ps")
        with test_stub.expected_failure("Create vm in disabled ps", Exception):
            test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=disabled_ps.uuid)

        test_util.test_dsc("Create 2 vms and check all should be in enabled PS")
        vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=2)
        for vm in vm_list:
            test_obj_dict.add_vm(vm)
        for vm in vm_list:
            assert vm.get_vm().allVolumes[0].primaryStorageUuid == enabled_ps.uuid

        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
        disabled_ps_list.pop()
    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #23
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')

    test_lib.lib_set_delete_policy('vm', 'Delay')
    test_lib.lib_set_delete_policy('volume', 'Delay')

    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid
    
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to maintain state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
    vm.destroy()
    #vm.expunge() maintain mode is not support expunge ops.
    vm.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)  

    test_lib.lib_set_delete_policy('vm', 'Direct')
    test_lib.lib_set_delete_policy('volume', 'Direct')

    test_util.test_pass('PS maintain mode Test Success')
コード例 #24
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid

    test_util.test_dsc('Create test vm and check')
    vm = test_stub.create_vr_vm('vm1', 'imageName_net', 'l3VlanNetwork3')
    test_obj_dict.add_vm(vm)

    backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm)
    for bs in backup_storage_list:
        if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
            break
    else:
        vm.destroy()
        test_util.test_skip('Not find ceph type backup storage.')

    l3_1_name = os.environ.get('l3VlanNetwork3')
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid

    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail(
            'VM is expected to stop when PS change to disable state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
    test_stub.migrate_vm_to_random_host(vm)
    vm.check()
    volume.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'Enabled')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)
    test_util.test_pass('PS disable mode Test Success')
コード例 #25
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid

    test_util.test_dsc('Create test vm and check')
    vm = test_stub.create_vr_vm('vm1', 'imageName_net', 'l3VlanNetwork3')
    test_obj_dict.add_vm(vm)

    backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm)
    for bs in backup_storage_list:
        if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
            break
    else:
        vm.destroy()
        test_util.test_skip('Not find ceph type backup storage.')

    l3_1_name = os.environ.get('l3VlanNetwork3')
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to stop when PS change to disable state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
    test_stub.migrate_vm_to_random_host(vm)
    vm.check()
    volume.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'Enabled')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)
    test_util.test_pass('PS disable mode Test Success')
コード例 #26
0
def rerunLongJob(job_uuid, ps_uuid, max_retry_times=1, executed_times=0):
    if executed_times != 0:
        ps_ops.change_primary_storage_state(ps_uuid, state='enable')
        rerun_job = longjob_ops.rerun_longjob(job_uuid)

    for i in range(30):
        cond = res_ops.gen_query_conditions('uuid', '=', job_uuid)
        long_job = res_ops.query_resource(res_ops.LONGJOB, cond)[0]
        if long_job.state == 'Failed':
            executed_times += 1
            if executed_times > max_retry_times:
               test_util.test_fail("Long job failed to rerun")
            else:
               rerunLongJob(job_uuid, ps_uuid, max_retry_times, executed_times)
#        elif long_job.state == 'Running':
#            time.sleep(60)
        elif long_job.state == 'Succeeded':
            test_util.test_fail('v2v long job did not fail after disable ps.')
        else:
            time.sleep(60)
コード例 #27
0
def test():
    flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
    ps_env = test_stub.PSEnvChecker()
    local_ps, shared_ps = ps_env.get_two_ps()

    if flavor['local_enable'] == False:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
    if flavor['shared_enable'] == False:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='disable')

    test_util.test_dsc("Try to Create VM without specified ps")
    if flavor['local_enable']:
        vm_list = test_stub.create_multi_vms(name_prefix='test-vm', count=2)
        for vm in vm_list:
            test_obj_dict.add_vm(vm)
            assert test_lib.lib_get_root_volume(
                vm.get_vm()).primaryStorageUuid == local_ps.uuid
    else:
        with test_lib.expected_failure('Create vm when no ps enabled',
                                       Exception):
            test_stub.create_multi_vms(name_prefix='test-vm', count=2)

    test_util.test_dsc("Create VM with Volume without specified ps")
    if flavor['local_enable'] and flavor['shared_enable']:
        vm_list = test_stub.create_multi_vms(name_prefix='test-vm',
                                             count=2,
                                             data_volume_number=1)
        for vm in vm_list:
            test_obj_dict.add_vm(vm)
            assert test_lib.lib_get_root_volume(
                vm.get_vm()).primaryStorageUuid == local_ps.uuid

            for data_vol in [
                    volume for volume in vm.get_vm().allVolumes
                    if volume.type != 'Root'
            ]:
                assert data_vol.primaryStorageUuid == shared_ps.uuid

    else:
        with test_lib.expected_failure(
                'Create vm with volume when no ps enabled', Exception):
            test_stub.create_multi_vms(name_prefix='test-vm',
                                       count=2,
                                       data_volume_number=1)

    if flavor['local_enable'] == False:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if flavor['shared_enable'] == False:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')

    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #28
0
def test():
    ps_env = test_stub.PSEnvChecker()

    local_ps = ps_env.get_random_local()
    nfs_ps = ps_env.get_random_nfs()

    test_util.test_dsc("Create {0} vm ".format(VM_COUNT))
    vm = test_stub.create_multi_vms(name_prefix='test-', count=VM_COUNT)[0]
    vm.check()
    test_obj_dict.add_vm(vm)

    test_util.test_dsc("Create {0} volumes in NFS".format(VOLUME_NUMBER))
    volume_in_nfs = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=nfs_ps)
    for volume in volume_in_nfs:
        test_obj_dict.add_volume(volume)
        volume.check()

    test_util.test_dsc("Attach all volumes to VM")
    for volume in volume_in_nfs:
        volume.attach(vm)
        volume.check()

    test_util.test_dsc("disable local PS")
    ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
    disabled_ps_list.append(local_ps)

    test_util.test_dsc("make sure all VM and Volumes still OK and running")
    vm.check()
    for volume in volume_in_nfs:
        volume.check()

    test_util.test_dsc("Try to create vm with datavolume")
    with test_stub.expected_failure('Create vm with datavol in nfs-local env when local disabled', Exception):
        test_stub.create_multi_vms(name_prefix='test-vm', count=1, datavolume=10)

    test_util.test_dsc("Try to create datavolume in NFS")
    volume = test_stub.create_multi_volumes(count=1, ps=nfs_ps)[0]
    test_obj_dict.add_volume(volume)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #29
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')

    test_lib.lib_set_delete_policy('vm', 'Delay')
    test_lib.lib_set_delete_policy('volume', 'Delay')

    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to running when PS change to disable state')

    vm.set_state(vm_header.RUNNING)
    vm.check()
    vm.destroy()
    vm.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)

    test_lib.lib_set_delete_policy('vm', 'Direct')
    test_lib.lib_set_delete_policy('volume', 'Direct')

    test_util.test_pass('PS disable mode Test Success')
コード例 #30
0
def test():
    flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
    ps_env = test_stub.PSEnvChecker()
    local_ps, shared_ps = ps_env.get_two_ps()

    if flavor['local_enable'] == False:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
    if flavor['shared_enable'] == False:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='disable')

    test_util.test_dsc("Try to Create VM without specified ps")
    if flavor['local_enable']:
        vm_list = test_stub.create_multi_vms(name_prefix='test-vm', count=2)
        for vm in vm_list:
            test_obj_dict.add_vm(vm)
            assert test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid == local_ps.uuid
    else:
        with test_lib.expected_failure('Create vm when no ps enabled', Exception):
            test_stub.create_multi_vms(name_prefix='test-vm', count=2)

    test_util.test_dsc("Create VM with Volume without specified ps")
    if flavor['local_enable'] and flavor['shared_enable']:
        vm_list = test_stub.create_multi_vms(name_prefix='test-vm', count=2, data_volume_number=1)
        for vm in vm_list:
            test_obj_dict.add_vm(vm)
            assert test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid == local_ps.uuid

            for data_vol in [volume for volume in vm.get_vm().allVolumes if volume.type != 'Root']:
                assert data_vol.primaryStorageUuid == shared_ps.uuid

    else:
        with test_lib.expected_failure('Create vm with volume when no ps enabled', Exception):
            test_stub.create_multi_vms(name_prefix='test-vm', count=2, data_volume_number=1)

    if flavor['local_enable'] == False:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if flavor['shared_enable'] == False:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')

    test_lib.lib_error_cleanup(test_obj_dict)
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid

    test_util.test_dsc('Create test vm and check')
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanDNATNetworkName')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    l3_net_list = [l3_net_uuid]
    l3_name2 = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid2 = test_lib.lib_get_l3_by_name(l3_name2).uuid
    

    vm = test_stub.create_vm(l3_net_list, image_uuid, 'attach_nic_vm', \
            default_l3_uuid = l3_net_uuid)
    test_obj_dict.add_vm(vm)
    vm.check()
    l3_1 = test_lib.lib_get_l3_by_name(l3_name)
    vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    vr_uuid = vr.uuid
    
    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid


    vm.add_nic(l3_net_uuid2)
    attached_nic = test_lib.lib_get_vm_last_nic(vm.get_vm())
    if l3_net_uuid2 != attached_nic.l3NetworkUuid:
        test_util.test_fail("After attach a nic, VM:%s last nic is not belong l3: %s" % (vm.get_vm().uuid, l3_net_uuid2))

    test_lib.lib_restart_vm_network(vm.get_vm())
    vm.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'disable')
    if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail('VM is expected to running when PS change to disable state')
    vm.set_state(vm_header.RUNNING)

    vm.remove_nic(attached_nic.uuid)
    attached_nic = test_lib.lib_get_vm_last_nic(vm.get_vm())
    if l3_net_uuid != attached_nic.l3NetworkUuid:
        test_util.test_fail("After detached NIC, VM:%s only nic is not belong l3: %s" % (vm.get_vm().uuid, l3_net_uuid2))

    vm.add_nic(l3_net_uuid2)
    attached_nic = test_lib.lib_get_vm_last_nic(vm.get_vm())
    if l3_net_uuid2 != attached_nic.l3NetworkUuid:
        test_util.test_fail("After attach a nic, VM:%s last nic is not belong l3: %s" % (vm.get_vm().uuid, l3_net_uuid2))

    test_lib.lib_restart_vm_network(vm.get_vm())
    vm.check()

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    vm_ops.reconnect_vr(vr_uuid)
    vm.check()
    vm.destroy()
    vm.check()
    #vm.expunge()
    test_util.test_pass('PS disable mode Test Success')
コード例 #32
0
def test():
    test_util.test_dsc("Test Resource Stack Apis")

    cond = res_ops.gen_query_conditions('status', '=', 'Ready')
    cond = res_ops.gen_query_conditions('state', '=', 'Enabled', cond)
    cond = res_ops.gen_query_conditions('system', '=', 'false', cond)
    image_queried = res_ops.query_resource(res_ops.IMAGE, cond)

    cond = res_ops.gen_query_conditions("category", '=', "Public")
    l3_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
    if len(l3_queried) == 0:
        cond = res_ops.gen_query_conditions("category", '=', "Private")
        l3_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)

    cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
    cond = res_ops.gen_query_conditions('type', '=', 'UserVm', cond)
    instance_offering_queried = res_ops.query_resource(
        res_ops.INSTANCE_OFFERING, cond)

    cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
    cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond)
    ps_queried = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)

    resource_stack_option = test_util.ResourceStackOption()
    resource_stack_option.set_name("Restart_Stack")
    templateContent = '''
{
    "ZStackTemplateFormatVersion": "2018-06-18",
    "Description": "Just create a flat network & VM",
    "Parameters": {
        "L3NetworkUuid":{
            "Type": "String",
            "Label": "三层网络",
            "DefaultValue": "testuuid"
        },
        "ImageUuid":{
            "Type": "String",
            "Label": "镜像"
        },
        "InstanceOfferingUuid":{
            "Type": "String",
            "Label": "计算规格"
        }
    },
    "Resources": {
        "VmInstance": {
            "Type": "ZStack::Resource::VmInstance",
            "Properties": {
                "name": "VM-STACK",
                "instanceOfferingUuid": {"Ref":"InstanceOfferingUuid"},
                "imageUuid":{"Ref":"ImageUuid"},
                "l3NetworkUuids":[{"Ref":"L3NetworkUuid"}]
            }
        }
    },
    "Outputs": {
        "VmInstance": {
            "Value": {
                "Ref": "VmInstance"
            }
        }
    }
}
'''
    #1.create resource stack
    ps_ops.change_primary_storage_state(ps_queried[0].uuid, 'disable')
    parameter = '{"ImageUuid":"%s","InstanceOfferingUuid":"%s","L3NetworkUuid":"%s"}' % (
        image_queried[0].uuid, instance_offering_queried[0].uuid,
        l3_queried[0].uuid)
    resource_stack_option.set_templateContent(templateContent)
    resource_stack_option.set_parameters(parameter)
    #preview_resource_stack = resource_stack_ops.preview_resource_stack(resource_stack_option)
    try:
        resource_stack = resource_stack_ops.create_resource_stack(
            resource_stack_option)
        test_util.test_fail(
            'This resource stack cannot be created successfully')
    except:
        pass

    #2.query resource stack
    cond = res_ops.gen_query_conditions('name', '=', 'Restart_Stack')
    resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK,
                                                    cond)

    cond = res_ops.gen_query_conditions('name', '=', 'VM-STACK')
    vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond)

    if len(resource_stack_queried) == 0:
        test_util.test_fail("Fail to query resource stack")
    elif resource_stack_queried[0].status == 'Created':
        test_util.test_fail('The status of resource stack cannot be created')
    else:
        if len(vm_queried) != 0:
            test_util.test_fail(
                'Vm cannot be created when resource stack is Rollbacked or Failed'
            )

    #3.restart resource stack
    ps_ops.change_primary_storage_state(ps_queried[0].uuid, 'enable')
    resource_stack_ops.restart_resource_stack(resource_stack_queried[0].uuid)

    cond = res_ops.gen_query_conditions('uuid', '=',
                                        resource_stack_queried[0].uuid)
    resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK,
                                                    cond)

    cond = res_ops.gen_query_conditions('name', '=', 'VM-STACK')
    vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond)

    if len(resource_stack_queried) == 0:
        test_util.test_fail("Fail to query resource stack")
    elif resource_stack_queried[0].status != 'Created':
        test_util.test_fail('The status of resource stack should be created')
    else:
        if len(vm_queried) == 0:
            test_util.test_fail(
                'Vm should be created when resource stack is Created')

    try:
        resource_stack_ops.restart_resource_stack(
            resource_stack_queried[0].uuid)
        test_util.test_fail(
            'Resource stack cannot restart when status of stack is created')
    except:
        pass

    #4.delete resource stack
    resource_stack_ops.delete_resource_stack(resource_stack_queried[0].uuid)

    cond = res_ops.gen_query_conditions('uuid', '=',
                                        resource_stack_queried[0].uuid)
    resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK,
                                                    cond)

    cond = res_ops.gen_query_conditions('name', '=', 'VM-STACK')
    vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond)

    if len(resource_stack_queried) != 0:
        test_util.test_fail("Fail to delete resource stack")
    elif len(vm_queried) != 0:
        test_util.test_fail(
            "Fail to delete resource when resource stack is deleted")

    test_util.test_pass('Restart Resource Stack Test Success')
コード例 #33
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()

    vm_list = []
    for root_vol_ps in [ps1, ps2]:
        for data_vol_ps in [ps1, ps2]:
            vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1,
                                            ps_uuid=root_vol_ps.uuid, data_volume_number=VOLUME_NUMBER,
                                            ps_uuid_for_data_vol=data_vol_ps.uuid)[0]
            test_obj_dict.add_vm(vm)
            vm_list.append(vm)

    vm1, vm2, vm3, vm4 = vm_list

    for vm in vm_list:
        ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    ps_ops.change_primary_storage_state(state='maintain', primary_storage_uuid=ps2.uuid)
    maintenance_ps_list.append(ps2)
    time.sleep(60)

    vr_vm_list = test_lib.lib_find_vr_by_vm(vm1.get_vm())
    vr_vm = None
    if vr_vm_list:
        vr_vm = vr_vm_list[0]
        if vr_vm.allVolumes[0].primaryStorageUuid == ps2.uuid:
            assert vr_vm.state == inventory.STOPPED
        else:
            assert vr_vm.state == inventory.RUNNING
            vm1.check()
    else:
        vm1.check()

    for vm in vm_list:
        vm.update()

    assert vm1.get_vm().state == inventory.RUNNING
    assert vm2.get_vm().state == inventory.STOPPED
    assert vm3.get_vm().state == inventory.STOPPED
    assert vm4.get_vm().state == inventory.STOPPED

    for vm in [vm2, vm3, vm4]:
        with test_stub.expected_failure("start vm in maintenance ps", Exception):
            vm.start()

    test_util.test_dsc('enable ps2')
    ps_ops.change_primary_storage_state(state='enable', primary_storage_uuid=ps2.uuid)
    maintenance_ps_list.remove(ps2)

    if vr_vm and vr_vm.state == inventory.STOPPED:
        vm_ops.start_vm(vr_vm.uuid)

    time.sleep(10)
    for vm in [vm2, vm3, vm4]:
        vm.start()

    for vm in [vm2, vm3, vm4]:
        vm.update()
        vm.check()

    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #34
0
def env_recover():
    for maintenance_ps in maintenance_ps_list:
        ps_ops.change_primary_storage_state(maintenance_ps.uuid,
                                            state='enable')
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #35
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps, another_ps = ps_env.get_two_ps()

    vm1, vm2 = test_stub.create_multi_vms(name_prefix='test-',
                                          count=2,
                                          ps_uuid=ps.uuid)

    for vm in (vm1, vm2):
        test_obj_dict.add_vm(vm)

    volume_in_another = test_stub.create_multi_volumes(
        count=VOLUME_NUMBER,
        ps=another_ps,
        host_uuid=test_lib.lib_get_vm_host(vm2.get_vm()).uuid
        if another_ps.type == inventory.LOCAL_STORAGE_TYPE else None)
    for volume in volume_in_another:
        test_obj_dict.add_volume(volume)

    for volume in volume_in_another:
        volume.attach(vm2)

    test_util.test_dsc('set another ps in maintenance mode')
    ps_ops.change_primary_storage_state(state='maintain',
                                        primary_storage_uuid=another_ps.uuid)
    maintenance_ps_list.append(another_ps)

    test_stub.wait_until_vm_reach_state(60, inventory.STOPPED, vm2)
    vm1.update()
    assert vm1.get_vm().state == inventory.RUNNING

    vr_vm_list = test_lib.lib_find_vr_by_vm(vm1.get_vm())
    vr_vm = None
    if vr_vm_list:
        vr_vm = vr_vm_list[0]
        if vr_vm.allVolumes[0].primaryStorageUuid == another_ps.uuid:
            assert vr_vm.state == inventory.STOPPED
        else:
            assert vr_vm.state == inventory.RUNNING
            vm1.check()
    else:
        vm1.check()

    with test_stub.expected_failure("Start vm in maintenance ps", Exception):
        vm2.start()

    test_util.test_dsc('enable another ps')
    ps_ops.change_primary_storage_state(state='enable',
                                        primary_storage_uuid=another_ps.uuid)
    maintenance_ps_list.remove(another_ps)

    if vr_vm and vr_vm.state == inventory.STOPPED:
        vm_ops.start_vm(vr_vm.uuid)

    time.sleep(10)
    vm2.start()
    vm2.check()

    for volume in volume_in_another:
        volume.detach()
        volume.attach(vm2)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #36
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for disabled_ps in disabled_ps_list:
        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
コード例 #37
0
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    first_ps_volume_list = env.first_ps_volume_list
    second_ps_vm_list = env.second_ps_vm_list
    second_ps_volume_list = env.second_ps_volume_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)
    tbj_list = first_ps_vm_list + second_ps_vm_list + first_ps_volume_list + second_ps_volume_list

    test_util.test_dsc('Disable random one Primary Storage')
    disabled_ps = random.choice([env.first_ps, env.second_ps])
    if disabled_ps is env.first_ps:
        enabled_ps = env.second_ps
    else:
        enabled_ps = env.first_ps
    ps_ops.change_primary_storage_state(disabled_ps.uuid, state='disable')
    disabled_ps_list.append(disabled_ps)

    test_util.test_dsc('make sure all VM and Volumes still OK and running')
    for test_object in tbj_list:
        test_object.check()

    test_util.test_dsc("Try to Create vm in disabeld ps")
    with test_stub.expected_failure("Create vm in disabled ps", Exception):
        test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=disabled_ps.uuid)

    test_util.test_dsc("Create 5 vms and check all should be in enabled PS")
    if ps_env.is_sb_ceph_env:
        if disabled_ps.uuid == env.first_ps:
            vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, ps_uuid=enabled_ps.uuid, bs_type="Ceph")
        else:
            vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, ps_uuid=enabled_ps.uuid, bs_type="ImageStoreBackupStorage")
    else:
        vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5)
    for vm in vm_list:
        test_obj_dict.add_vm(vm)
    for vm in vm_list:
        assert vm.get_vm().allVolumes[0].primaryStorageUuid != disabled_ps.uuid

    ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
    disabled_ps_list.pop()
    test_util.test_dsc("Create 1 vms in the recovered ps")
    if ps_env.is_sb_ceph_env:
        if disabled_ps.uuid == env.first_ps:
            vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid, bs_type="ImageStoreBackupStorage")[0]
        else:
            vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid, bs_type="Ceph")[0]
    else:
        vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid)[0]
    test_obj_dict.add_vm(vm)


    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
    ps_env = test_stub.PSEnvChecker()
    local_ps, shared_ps = ps_env.get_two_ps()
    if flavor['local_state'] is DISABLED:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
    elif flavor['local_state'] is MAINTAIMANCE:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='maintain')

    if flavor['shared_state'] is DISABLED:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='disable')
    elif flavor['shared_state'] is MAINTAIMANCE:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='maintain')

    if flavor['local_state'] in (DISABLED, MAINTAIMANCE):
        with test_lib.expected_failure(
                'Create vm in ps in {} or {} state'.format(
                    DISABLED, MAINTAIMANCE), Exception):
            test_stub.create_multi_vms(name_prefix='test-vm',
                                       count=1,
                                       ps_uuid=local_ps.uuid)

    if flavor['local_state'] is DISABLED:
        vm1 = test_stub.create_multi_vms(name_prefix='test-vm',
                                         count=1,
                                         ps_uuid=shared_ps.uuid)[0]
        test_obj_dict.add_vm(vm1)
        vm2 = test_stub.create_multi_vms(
            name_prefix='test-vm',
            count=1,
            ps_uuid=shared_ps.uuid,
            data_volume_number=1,
            ps_uuid_for_data_vol=shared_ps.uuid)[0]
        test_obj_dict.add_vm(vm2)

    if flavor['shared_state'] in (DISABLED, MAINTAIMANCE):
        with test_lib.expected_failure(
                'Create vm in ps in {} or {} state'.format(
                    DISABLED, MAINTAIMANCE), Exception):
            test_stub.create_multi_vms(name_prefix='test-vm',
                                       count=1,
                                       ps_uuid=shared_ps.uuid)

        vm1 = test_stub.create_multi_vms(name_prefix='test-vm',
                                         count=1,
                                         ps_uuid=local_ps.uuid)[0]
        test_obj_dict.add_vm(vm1)
        vm2 = test_stub.create_multi_vms(name_prefix='test-vm',
                                         count=1,
                                         ps_uuid=local_ps.uuid,
                                         data_volume_number=1,
                                         ps_uuid_for_data_vol=local_ps.uuid)[0]
        test_obj_dict.add_vm(vm2)

    if flavor['local_state'] in (DISABLED, MAINTAIMANCE):
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if flavor['shared_state'] in (DISABLED, MAINTAIMANCE):
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')

    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #39
0
def test():
    flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
    ps_env = test_stub.PSEnvChecker()
    local_ps, shared_ps = ps_env.get_two_ps()

    vm_list=list(test_stub.generate_local_shared_test_vms(test_obj_dict, vm_ha=flavor['vm_ha']))
    (vm_root_local, vm_root_local_data_local,
     vm_root_local_data_shared, vm_root_local_data_mixed,
     vm_root_shared, vm_root_shared_data_local,
     vm_root_shared_data_shared, vm_root_shared_data_mixed) = vm_list

    if flavor['local_state'] is DISABLED:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
        time.sleep(10)
        for vm in vm_list:
            vm.update()
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['shared_state'] is DISABLED:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='disable')
        time.sleep(10)
        for vm in vm_list:
            vm.update()
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['reconnect']:
        for ps in (local_ps, shared_ps):
            ps_ops.reconnect_primary_storage(ps.uuid)
        for vm in vm_list:
            vm.update()
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['local_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='maintain')
        maintain_ps = local_ps
    if flavor['shared_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='maintain')
        maintain_ps = shared_ps
    time.sleep(60)

    if MAINTENANCE in (flavor['local_state'], flavor['shared_state']):
        vr_vm_list = test_lib.lib_find_vr_by_vm(vm_list[0].get_vm())
        vr_vm = None
        if vr_vm_list:
            vr_vm = vr_vm_list[0]
            if vr_vm.allVolumes[0].primaryStorageUuid == maintain_ps.uuid:
                assert vr_vm.state == inventory.STOPPED
            else:
                assert vr_vm.state == inventory.RUNNING

        for vm in vm_list:
            vm.update()

    if flavor['local_state'] is MAINTENANCE:
        for vm in (vm_root_local, vm_root_local_data_local,vm_root_local_data_shared, vm_root_local_data_mixed,
                   vm_root_shared_data_mixed,vm_root_shared_data_local):
            assert vm.get_vm().state == inventory.STOPPED
            with test_stub.expected_failure("start vm in maintenance ps", Exception):
                vm.start()

        for vm in (vm_root_shared, vm_root_shared_data_shared):
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['shared_state'] is MAINTENANCE:
        for vm in (vm_root_shared, vm_root_shared_data_shared,vm_root_shared_data_local, vm_root_shared_data_mixed,
                   vm_root_local_data_mixed,vm_root_local_data_shared):
            assert vm.get_vm().state == inventory.STOPPED
            with test_stub.expected_failure("start vm in maintenance ps", Exception):
                vm.start()
        for vm in (vm_root_local, vm_root_local_data_local):
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['local_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if flavor['shared_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')

#    if MAINTENANCE in (flavor['local_state'], flavor['shared_state']):
#        if vr_vm and vr_vm.state == inventory.STOPPED:
#            vm_ops.start_vm(vr_vm.uuid)

    for vm in vm_list:
        vm.update()
        if vm.get_vm().state == inventory.STOPPED and vm.get_vm().type != 'ApplianceVm':
            vm.start()
        vm.check()
コード例 #40
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    test_stub.remove_all_vpc_vrouter()
    for ps in ps_list:
        if ps.state == 'maintain':
            ps_ops.change_primary_storage_state(ps.uuid, "enable")
コード例 #41
0
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')
    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid

    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    test_util.test_dsc('Add ISO Image')
    cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid
    img_option = test_util.ImageOption()
    img_option.set_name('iso')
    img_option.set_backup_storage_uuid_list([bs_uuid])
    os.system(
        "echo fake iso for test only >  %s/apache-tomcat/webapps/zstack/static/test.iso"
        % (os.environ.get('zstackInstallPath')))
    img_option.set_url('http://%s:8080/zstack/static/test.iso' %
                       (os.environ.get('node1Ip')))
    image_inv = img_ops.add_iso_template(img_option)
    image = test_image.ZstackTestImage()
    image.set_image(image_inv)
    image.set_creation_option(img_option)
    test_obj_dict.add_image(image)

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail(
            'VM is expected to stop when PS change to maintain state')
    vm.set_state(vm_header.STOPPED)
    vm.check()

    test_util.test_dsc('Attach ISO to VM')
    cond = res_ops.gen_query_conditions('name', '=', 'iso')
    iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
    img_ops.attach_iso(iso_uuid, vm.vm.uuid)

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)

    vm.start()
    vm.check()
    vm.destroy()
    vm.check()
    #vm.expunge()
    #vm.check()
    test_util.test_pass('PS maintain mode Test Success')
コード例 #42
0
def test():

    test_util.test_dsc("create vpc vrouter")

    vr = test_stub.create_vpc_vrouter()

    test_util.test_dsc("attach vpc l3 to vpc vrouter")
    test_stub.attach_l3_to_vpc_vr(vr, test_stub.L3_SYSTEM_NAME_LIST)

    test_util.test_dsc("Create one neverstop vm in random L3")
    vm = test_stub.create_vm_with_random_offering(vm_name='vpc_vm1', l3_name=random.choice(test_stub.L3_SYSTEM_NAME_LIST))
    test_obj_dict.add_vm(vm)
    vm.check()

    ps_list = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
    for ps in ps_list:
        ps_uuid = ps.uuid
        ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    
    time.sleep(10)
    cond = res_ops.gen_query_conditions('uuid', '=', vr.inv.uuid)
    vr = res_ops.query_resource(res_ops.VM_INSTANCE,cond)[0]
    vm.update()
    assert vr.state == 'Stopped'
    assert vm.vm.state == 'Stopped'
 
    for ps in ps_list:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')

    test_stub.ensure_hosts_connected(120)
    test_stub.ensure_pss_connected()
    vm.start()
    vr = res_ops.query_resource(res_ops.VM_INSTANCE,cond)[0]
    assert vr.state == 'Running'

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
    for ps in ps_list:
        ps_ops.change_primary_storage_state(ps_uuid, 'maintain')   
 
    time.sleep(20)
    vm.update()
    vr = res_ops.query_resource(res_ops.VM_INSTANCE,cond)[0]
    assert vr.state == 'Stopped'
    assert vm.vm.state == 'Stopped'

    for ps in ps_list:
        ps_ops.change_primary_storage_state(ps_uuid, 'enable')

    test_stub.ensure_pss_connected()
    
    for i in range(5):
        vm.update()
        print vm.vm.state
        if vm.vm.state == 'Running':
            break
        else:
           time.sleep(60)
    assert vm.vm.state == 'Running' 
    vr = res_ops.query_resource(res_ops.VM_INSTANCE,cond)[0]
    assert vr.state == 'Running'
    
    
    test_lib.lib_error_cleanup(test_obj_dict)
    test_stub.remove_all_vpc_vrouter()
コード例 #43
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for disabled_ps in disabled_ps_list:
        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
コード例 #44
0
def test():
    flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
    ps_env = test_stub.PSEnvChecker()
    local_ps, shared_ps = ps_env.get_two_ps()

    vm_list = list(
        test_stub.generate_local_shared_test_vms(test_obj_dict,
                                                 vm_ha=flavor['vm_ha']))
    (vm_root_local, vm_root_local_data_local, vm_root_local_data_shared,
     vm_root_local_data_mixed, vm_root_shared, vm_root_shared_data_local,
     vm_root_shared_data_shared, vm_root_shared_data_mixed) = vm_list

    if flavor['local_state'] is DISABLED:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='disable')
        time.sleep(10)
        for vm in vm_list:
            vm.update()
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['shared_state'] is DISABLED:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='disable')
        time.sleep(10)
        for vm in vm_list:
            vm.update()
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['reconnect']:
        for ps in (local_ps, shared_ps):
            ps_ops.reconnect_primary_storage(ps.uuid)
        for vm in vm_list:
            vm.update()
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['local_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='maintain')
        maintain_ps = local_ps
    if flavor['shared_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='maintain')
        maintain_ps = shared_ps
    time.sleep(30)

    if MAINTENANCE in (flavor['local_state'], flavor['shared_state']):
        vr_vm_list = test_lib.lib_find_vr_by_vm(vm_list[0].get_vm())
        vr_vm = None
        if vr_vm_list:
            vr_vm = vr_vm_list[0]
            if vr_vm.allVolumes[0].primaryStorageUuid == maintain_ps.uuid:
                assert vr_vm.state == inventory.STOPPED
            else:
                assert vr_vm.state == inventory.RUNNING

        for vm in vm_list:
            vm.update()

    if flavor['local_state'] is MAINTENANCE:
        for vm in (vm_root_local, vm_root_local_data_local,
                   vm_root_local_data_shared, vm_root_local_data_mixed,
                   vm_root_shared_data_mixed, vm_root_shared_data_local):
            assert vm.get_vm().state == inventory.STOPPED
            with test_stub.expected_failure("start vm in maintenance ps",
                                            Exception):
                vm.start()

        for vm in (vm_root_shared, vm_root_shared_data_shared):
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['shared_state'] is MAINTENANCE:
        for vm in (vm_root_shared, vm_root_shared_data_shared,
                   vm_root_shared_data_local, vm_root_shared_data_mixed,
                   vm_root_local_data_mixed, vm_root_local_data_shared):
            assert vm.get_vm().state == inventory.STOPPED
            with test_stub.expected_failure("start vm in maintenance ps",
                                            Exception):
                vm.start()
        for vm in (vm_root_local, vm_root_local_data_local):
            assert vm.get_vm().state == inventory.RUNNING

    if flavor['local_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(local_ps.uuid, state='enable')
    if flavor['shared_state'] is MAINTENANCE:
        ps_ops.change_primary_storage_state(shared_ps.uuid, state='enable')

    if MAINTENANCE in (flavor['local_state'], flavor['shared_state']):
        if vr_vm and vr_vm.state == inventory.STOPPED:
            vm_ops.start_vm(vr_vm.uuid)

    for vm in vm_list:
        if vm.get_vm().state == inventory.STOPPED:
            vm.start()
        vm.check()
def test():
    global test_obj_dict
    global ps_uuid
    global host_uuid
    global vr_uuid
    test_util.test_dsc('Create test vm and check')

    bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \
            None)
    if not bss:
        test_util.test_skip("not find available backup storage. Skip test")

    if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE:
        test_util.test_skip(
            "not find available imagestore or ceph backup storage. Skip test")

    l3_1_name = os.environ.get('l3VlanNetworkName1')
    vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
    #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
    #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
    #vr_uuid = vr.uuid

    host = test_lib.lib_get_vm_host(vm.get_vm())
    host_uuid = host.uuid
    test_obj_dict.add_vm(vm)
    vm.check()

    disk_offering = test_lib.lib_get_disk_offering_by_name(
        os.environ.get('rootDiskOfferingName'))
    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume_creation_option.set_system_tags(
        ['ephemeral::shareable', 'capability::virtio-scsi'])
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume.attach(vm)

    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    ps_uuid = ps.uuid
    ps_ops.change_primary_storage_state(ps_uuid, 'maintain')
    if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90):
        test_util.test_fail(
            'VM is expected to stop when PS change to maintain state')

    vm.set_state(vm_header.STOPPED)
    vm.check()
    volume.detach(vm.get_vm().uuid)

    ps_ops.change_primary_storage_state(ps_uuid, 'enable')
    host_ops.reconnect_host(host_uuid)
    #vm_ops.reconnect_vr(vr_uuid)
    vrs = test_lib.lib_get_all_vrs()
    for vr in vrs:
        vm_ops.start_vm(vr.uuid)

    vm.start()
    vm.check()

    volume.delete()
    #volume.expunge()
    volume.check()
    vm.destroy()

    test_util.test_pass('Delete volume under PS maintain mode Test Success')
コード例 #46
0
def env_recover():
    for maintenance_ps in maintenance_ps_list:
        ps_ops.change_primary_storage_state(maintenance_ps.uuid, state='enable')
    test_lib.lib_error_cleanup(test_obj_dict)
コード例 #47
0
def test():
    ps_env = test_stub.PSEnvChecker()
    ps1, ps2 = ps_env.get_two_ps()

    vm_list = []
    for root_vol_ps in [ps1, ps2]:
        for data_vol_ps in [ps1, ps2]:
            if root_vol_ps.type == "SharedBlock":
                bs_type = "ImageStoreBackupStorage"
            elif root_vol_ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
                bs_type = "Ceph"
            else:
                bs_type = None
            vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1,
                                            ps_uuid=root_vol_ps.uuid, data_volume_number=VOLUME_NUMBER,
                                            ps_uuid_for_data_vol=data_vol_ps.uuid, timeout=1200000, bs_type=bs_type)[0]
            test_obj_dict.add_vm(vm)
            vm_list.append(vm)

    vm1, vm2, vm3, vm4 = vm_list

    ps_ops.change_primary_storage_state(state='maintain', primary_storage_uuid=ps2.uuid)
    maintenance_ps_list.append(ps2)
    time.sleep(60)

    vr_vm_list = test_lib.lib_find_vr_by_vm(vm1.get_vm())
    vr_vm = None
    if vr_vm_list:
        vr_vm = vr_vm_list[0]
        if vr_vm.allVolumes[0].primaryStorageUuid == ps2.uuid:
            assert vr_vm.state == inventory.STOPPED  or vr_vm.state == inventory.STOPPING
        else:
            assert vr_vm.state == inventory.RUNNING
            vm1.check()
    else:
        vm1.check()

    for vm in vm_list:
        vm.update()

    assert vm1.get_vm().state == inventory.RUNNING
    assert vm2.get_vm().state == inventory.STOPPED
    assert vm3.get_vm().state == inventory.STOPPED
    assert vm4.get_vm().state == inventory.STOPPED

    for vm in [vm2, vm3, vm4]:
        with test_stub.expected_failure("start vm in maintenance ps", Exception):
            vm.start()

    test_util.test_dsc('enable ps2')
    ps_ops.change_primary_storage_state(state='enable', primary_storage_uuid=ps2.uuid)
    maintenance_ps_list.remove(ps2)

    if vr_vm and vr_vm.state == inventory.STOPPED:
        vm_ops.start_vm(vr_vm.uuid)

    time.sleep(10)
    for vm in [vm2, vm3, vm4]:
        vm.start()

    for vm in [vm2, vm3, vm4]:
        vm.update()
        vm.check()

    test_util.test_pass('Multi PrimaryStorage Test Pass')
コード例 #48
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps, another_ps = ps_env.get_two_ps()

    if ps_env.is_sb_ceph_env:
        vm1, vm2 = test_stub.create_multi_vms(name_prefix='test-', count=2, ps_uuid=ps.uuid, timeout=600000, bs_type='ImageStoreBackupStorage')
    else:
        vm1, vm2 = test_stub.create_multi_vms(name_prefix='test-', count=2, ps_uuid=ps.uuid, timeout=600000)

    for vm in (vm1, vm2):
        test_obj_dict.add_vm(vm)

    volume_in_another = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=another_ps,
                                                       host_uuid=test_lib.lib_get_vm_host(vm2.get_vm()).uuid
                                                       if another_ps.type == inventory.LOCAL_STORAGE_TYPE else None)
    for volume in volume_in_another:
        test_obj_dict.add_volume(volume)

    for volume in volume_in_another:
        volume.attach(vm2)

    test_util.test_dsc('set another ps in maintenance mode')
    ps_ops.change_primary_storage_state(state='maintain', primary_storage_uuid=another_ps.uuid)
    maintenance_ps_list.append(another_ps)

    test_stub.wait_until_vm_reach_state(60, inventory.STOPPED, vm2)
    vm1.update()
    assert vm1.get_vm().state == inventory.RUNNING

    vr_vm_list = test_lib.lib_find_vr_by_vm(vm1.get_vm())
    vr_vm = None
    if vr_vm_list:
        vr_vm = vr_vm_list[0]
        if vr_vm.allVolumes[0].primaryStorageUuid == another_ps.uuid:
            assert vr_vm.state == inventory.STOPPED  or vr_vm.state == inventory.STOPPING
        else:
            assert vr_vm.state == inventory.RUNNING
            vm1.check()
    else:
        vm1.check()

    with test_stub.expected_failure("Start vm in maintenance ps", Exception):
        vm2.start()

    test_util.test_dsc('enable another ps')
    ps_ops.change_primary_storage_state(state='enable', primary_storage_uuid=another_ps.uuid)
    maintenance_ps_list.remove(another_ps)

    if vr_vm and vr_vm.state == inventory.STOPPED:
        vm_ops.start_vm(vr_vm.uuid)

    time.sleep(10)
    vm2.start()
    vm2.check()

    for volume in volume_in_another:
        volume.detach()
        volume.attach(vm2)

    test_util.test_pass('Multi PrimaryStorage Test Pass')