Esempio n. 1
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid,
                                          new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach random one Primary Storage from cluster')
    selected_ps = random.choice([env.first_ps, env.second_ps])
    if selected_ps is env.first_ps:
        another_ps = env.second_ps
    else:
        another_ps = env.first_ps
    ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    detached_ps_list.append(selected_ps)

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in env.get_vm_list_from_ps(selected_ps):
        assert vm.get_vm().state == inventory.STOPPED

    for vm in env.get_vm_list_from_ps(another_ps):
        assert vm.get_vm().state == inventory.RUNNING

    with test_stub.expected_failure('start vm in ps that not attached to cluster', Exception):
        random.choice(env.get_vm_list_from_ps(selected_ps)).start()

    with test_stub.expected_failure("Create vm in detached ps", Exception):
        test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid)

    test_util.test_dsc("Create 5 vms and check all should be in enabled PS")
    if ps_env.is_sb_ceph_env:
        if selected_ps.type == "SharedBlock":
            vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, bs_type='Ceph')
        else:
            vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, bs_type='ImageStoreBackupStorage')
    else:
        vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5)
    for vm in vm_list:
        test_obj_dict.add_vm(vm)
    for vm in vm_list:
        assert vm.get_vm().allVolumes[0].primaryStorageUuid == another_ps.uuid

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover():
    test_util.test_dsc("Destroy test object")
    test_lib.lib_error_cleanup(test_obj_dict)
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
Esempio n. 4
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for disabled_ps in disabled_ps_list:
        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for disabled_ps in disabled_ps_list:
        ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
def env_recover():
    for ps in detached_ps_list:
        ps_ops.attach_primary_storage(ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach random one Primary Storage from cluster')
    selected_ps = random.choice([env.first_ps, env.second_ps])
    if selected_ps is env.first_ps:
        another_ps = env.second_ps
    else:
        another_ps = env.first_ps

    for _ in xrange(5):
        ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.append(selected_ps)
        ps_ops.attach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.pop()

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in env.get_vm_list_from_ps(selected_ps):
        assert vm.get_vm().state == inventory.STOPPED

    for vm in env.get_vm_list_from_ps(another_ps):
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Recover the vm in the selected ps")
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.start()
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.check()
        vm.update()
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Create one vm in selected ps")
    vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                         first_ps_vm_number=VM_COUNT,
                                         second_ps_vm_number=VM_COUNT,
                                         first_ps_volume_number=VOLUME_NUMBER,
                                         second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach random one Primary Storage from cluster')
    selected_ps = random.choice([env.first_ps, env.second_ps])
    if selected_ps is env.first_ps:
        another_ps = env.second_ps
    else:
        another_ps = env.first_ps

    for _ in xrange(5):
        ps_ops.detach_primary_storage(
            selected_ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.append(selected_ps)
        ps_ops.attach_primary_storage(
            selected_ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.pop()

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in env.get_vm_list_from_ps(selected_ps):
        assert vm.get_vm().state == inventory.STOPPED

    for vm in env.get_vm_list_from_ps(another_ps):
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Recover the vm in the selected ps")
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.start()
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.check()
        vm.update()
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Create one vm in selected ps")
    vm = test_stub.create_multi_vms(name_prefix='test-vm',
                                    count=1,
                                    ps_uuid=selected_ps.uuid)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover():
    for ps in detached_ps_list:
        ps_ops.attach_primary_storage(
            ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid,
                                          new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
Esempio n. 10
0
def test():
    env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                         first_ps_vm_number=VM_COUNT,
                                         second_ps_vm_number=VM_COUNT,
                                         first_ps_volume_number=VOLUME_NUMBER,
                                         second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach random one Primary Storage from cluster')
    selected_ps = random.choice([env.first_ps, env.second_ps])
    if selected_ps is env.first_ps:
        another_ps = env.second_ps
    else:
        another_ps = env.first_ps
    ps_ops.detach_primary_storage(
        selected_ps.uuid,
        res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    detached_ps_list.append(selected_ps)

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in env.get_vm_list_from_ps(selected_ps):
        assert vm.get_vm().state == inventory.STOPPED

    for vm in env.get_vm_list_from_ps(another_ps):
        assert vm.get_vm().state == inventory.RUNNING

    with test_stub.expected_failure(
            'start vm in ps that not attached to cluster', Exception):
        random.choice(env.get_vm_list_from_ps(selected_ps)).start()

    with test_stub.expected_failure("Create vm in detached ps", Exception):
        test_stub.create_multi_vms(name_prefix='test-vm',
                                   count=1,
                                   ps_uuid=selected_ps.uuid)

    test_util.test_dsc("Create 5 vms and check all should be in enabled PS")
    vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5)
    for vm in vm_list:
        test_obj_dict.add_vm(vm)
    for vm in vm_list:
        assert vm.get_vm().allVolumes[0].primaryStorageUuid == another_ps.uuid

    test_util.test_pass('Multi PrimaryStorage Test Pass')
Esempio n. 11
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()

    for _ in xrange(5):
        test_util.test_dsc('Remove ps2')
        ps_ops.detach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        ps_ops.delete_primary_storage(ps2.uuid)
        delete_ps_list.append(ps2)
        test_util.test_dsc('Add ps2 back')
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(ps2.name)
        ps_config.set_description(ps2.description)
        ps_config.set_zone_uuid(ps2.zoneUuid)
        ps_config.set_type(ps2.type)
        ps_config.set_url(ps2.url)
        if ps2.type == inventory.LOCAL_STORAGE_TYPE:
            ps2 = ps_ops.create_local_primary_storage(ps_config)
        elif ps2.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            ps2 = ps_ops.create_nfs_primary_storage(ps_config)
        else:
            ps2 = None
        time.sleep(5)
        ps_ops.attach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        time.sleep(5)
        delete_ps_list.pop()

    test_util.test_dsc('create VM by default para')
    vm1 = test_stub.create_multi_vms(name_prefix='vm1',
                                     count=1,
                                     data_volume_number=VOLUME_NUMBER)[0]
    test_obj_dict.add_vm(vm1)

    if ps_env.is_local_nfs_env:
        test_util.test_dsc('create date volume in ps2')
        volume = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=ps2)
        test_obj_dict.add_volume(volume)
    else:
        test_util.test_dsc('create VM in ps2')
        vm2 = test_stub.create_multi_vms(name_prefix='vm2',
                                         count=1,
                                         ps_uuid=ps2.uuid,
                                         data_volume_number=VOLUME_NUMBER)[0]
        test_obj_dict.add_vm(vm2)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                         first_ps_vm_number=VM_COUNT,
                                         second_ps_vm_number=VM_COUNT,
                                         first_ps_volume_number=VOLUME_NUMBER,
                                         second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach all Primary Storage from cluster')

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.detach_primary_storage(ps.uuid, ps.attachedClusterUuids[0])
        detached_ps_list.append(ps)

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.STOPPED

    with test_stub.expected_failure('Create vm when no ps attached to cluster',
                                    Exception):
        test_stub.create_multi_vms(name_prefix='test-vm', count=1)

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.attach_primary_storage(
            ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.remove(ps)

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.start()

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.check()
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.RUNNING

    vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()
    if ps2.type == 'SharedBlock':
        disk_uuid.append(ps2.sharedBlocks[0].diskUuid)

    vm_list = []
    for root_vol_ps in [ps1, ps2]:
        for data_vol_ps in [ps1, ps2]:
            vm = test_stub.create_multi_vms(
                name_prefix='test_vm',
                count=1,
                ps_uuid=root_vol_ps.uuid,
                data_volume_number=VOLUME_NUMBER,
                ps_uuid_for_data_vol=data_vol_ps.uuid,
                timeout=1200000)[0]
            test_obj_dict.add_vm(vm)
            vm_list.append(vm)

    vm1, vm2, vm3, vm4 = vm_list

    ps_ops.detach_primary_storage(
        ps2.uuid,
        res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    delete_ps_list.append(ps2)
    time.sleep(30)
    for vm in vm_list:
        vm.update()

    assert vm1.get_vm().state == inventory.RUNNING
    assert vm2.get_vm().state == inventory.STOPPED
    assert vm3.get_vm().state == inventory.STOPPED
    assert vm4.get_vm().state == inventory.STOPPED

    ps_ops.delete_primary_storage(ps2.uuid)
    time.sleep(10)
    conf = res_ops.gen_query_conditions('type', '=', 'UserVM')
    left_vm_list = res_ops.query_resource(res_ops.VM_INSTANCE, conf)
    assert len(left_vm_list) == 2
    left_vm_uuid_list = [vm.uuid for vm in left_vm_list]
    assert vm1.get_vm().uuid in left_vm_uuid_list
    assert vm2.get_vm().uuid in left_vm_uuid_list

    assert len(res_ops.query_resource(res_ops.VOLUME)) == \
           VOLUME_NUMBER * 2 + len(res_ops.query_resource(res_ops.VM_INSTANCE))

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    ps_env = test_stub.PSEnvChecker()
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
    ceph_ps = [ps for ps in ps_inv if ps.type == 'Ceph']
    if not ceph_ps:
        test_util.test_skip('Skip test as there is not Ceph primary storage')
    
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach Ceph Primary Storage from cluster')
    selected_ps = env.second_ps
    another_ps = env.first_ps
    ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    detached_ps_list.append(selected_ps)

    target_vm = random.choice(first_ps_vm_list)
    target_vm.stop()
    target_vm_uuid = target_vm.get_vm().uuid
    ps_to_migrate = random.choice(datamigr_ops.get_ps_candidate_for_vm_migration(target_vm_uuid))
    ps_uuid_to_migrate = ps_to_migrate.uuid
    datamigr_ops.ps_migrage_vm(ps_uuid_to_migrate, target_vm_uuid)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()
    if ps_env.is_sb_ceph_env:
        disk_uuid.append(ps1.sharedBlocks[0].diskUuid)
    else:
        if ps2.type == 'SharedBlock':
            disk_uuid.append(ps2.sharedBlocks[0].diskUuid)

    vm_list = []
    for root_vol_ps in [ps1, ps2]:
        for data_vol_ps in [ps1, ps2]:
            if ps_env.is_sb_ceph_env:
                vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1,
                                                ps_uuid=root_vol_ps.uuid, data_volume_number=VOLUME_NUMBER,
                                                ps_uuid_for_data_vol=data_vol_ps.uuid, timeout=1200000,
                                                bs_type="ImageStoreBackupStorage" if root_vol_ps.type == "SharedBlock" else "Ceph")[0]
            else:
                vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1,
                                                ps_uuid=root_vol_ps.uuid, data_volume_number=VOLUME_NUMBER,
                                                ps_uuid_for_data_vol=data_vol_ps.uuid, timeout=1200000)[0]
            test_obj_dict.add_vm(vm)
            vm_list.append(vm)

    vm1, vm2, vm3, vm4 = vm_list

    if ps_env.is_sb_ceph_env:
        ps_ops.detach_primary_storage(ps1.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        delete_ps_list.append(ps1)
    else:
        ps_ops.detach_primary_storage(ps2.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        delete_ps_list.append(ps2)
    time.sleep(30)
    for vm in vm_list:
        vm.update()

    if ps_env.is_sb_ceph_env:
        assert vm1.get_vm().state == inventory.STOPPED
        assert vm2.get_vm().state == inventory.STOPPED
        assert vm3.get_vm().state == inventory.STOPPED
        assert vm4.get_vm().state == inventory.RUNNING
        ps_ops.delete_primary_storage(ps1.uuid)
    else:
        assert vm1.get_vm().state == inventory.RUNNING
        assert vm2.get_vm().state == inventory.STOPPED
        assert vm3.get_vm().state == inventory.STOPPED
        assert vm4.get_vm().state == inventory.STOPPED
        ps_ops.delete_primary_storage(ps2.uuid)

    
    time.sleep(10)
    conf = res_ops.gen_query_conditions('type', '=', 'UserVM')
    left_vm_list = res_ops.query_resource(res_ops.VM_INSTANCE, conf)
    assert len(left_vm_list) == 2
    left_vm_uuid_list = [vm.uuid for vm in left_vm_list]
    if ps_env.is_sb_ceph_env:
        assert vm3.get_vm().uuid in left_vm_uuid_list
        assert vm4.get_vm().uuid in left_vm_uuid_list
    else:
        assert vm1.get_vm().uuid in left_vm_uuid_list
        assert vm2.get_vm().uuid in left_vm_uuid_list

    assert len(res_ops.query_resource(res_ops.VOLUME)) == \
           VOLUME_NUMBER * 2 + len(res_ops.query_resource(res_ops.VM_INSTANCE))

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach all Primary Storage from cluster')

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.detach_primary_storage(ps.uuid, ps.attachedClusterUuids[0])
        detached_ps_list.append(ps)

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.STOPPED

    ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE)
    if len(ps_list) == 2:
        with test_stub.expected_failure('Create vm when no ps attached to cluster', Exception):
            test_stub.create_multi_vms(name_prefix='test-vm', count=1)

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.attach_primary_storage(ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.remove(ps)

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.start()

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.check()
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.RUNNING

    if ps_env.is_sb_ceph_env:
        vm1 = test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=env.first_ps.uuid, bs_type='ImageStoreBackupStorage')[0]
        vm2 = test_stub.create_multi_vms(name_prefix='text-vm', count=1, ps_uuid=env.second_ps.uuid, bs_type='Ceph')[0]
        test_obj_dict.add_vm(vm1)
        test_obj_dict.add_vm(vm2)
    else:
        vm = test_stub.create_multi_vms(name_prefix='text-vm', count=1, ps_uuid=env.second_ps.uuid)[0]
        test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
Esempio n. 17
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()
    if ps2.type == 'SharedBlock':
        volumegroup_uuid = ps2.sharedBlocks[0].sharedBlockGroupUuid
        disk_uuid.append(ps2.sharedBlocks[0].diskUuid)

    for _ in xrange(5):
        test_util.test_dsc('Remove ps2')
        ps_ops.detach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        ps_ops.delete_primary_storage(ps2.uuid)
        delete_ps_list.append(ps2)
        test_util.test_dsc('Add ps2 back')
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(ps2.name)
        ps_config.set_description(ps2.description)
        ps_config.set_zone_uuid(ps2.zoneUuid)
        ps_config.set_type(ps2.type)
        ps_config.set_url(ps2.url)
        if ps2.type == inventory.LOCAL_STORAGE_TYPE:
            ps2 = ps_ops.create_local_primary_storage(ps_config)
        elif ps2.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            ps2 = ps_ops.create_nfs_primary_storage(ps_config)
        elif ps2.type == "SharedBlock":
            host = random.choice(res_ops.query_resource(res_ops.HOST))
            cmd = "vgchange --lock-start %s && vgremove %s -y" % (
                volumegroup_uuid, volumegroup_uuid)
            host_username = os.environ.get('hostUsername')
            host_password = os.environ.get('hostPassword')
            rsp = test_lib.lib_execute_ssh_cmd(host.managementIp,
                                               host_username, host_password,
                                               cmd, 240)
            if not rsp:
                cmd = "vgs"
                rsp = test_lib.lib_execute_ssh_cmd(host.managementIp,
                                                   host_username,
                                                   host_password, cmd, 240)
                test_util.test_logger(rsp)
                test_util.test_fail("vgremove failed")
            ps2 = ps_ops.create_sharedblock_primary_storage(
                ps_config, disk_uuid)
            volumegroup_uuid = ps2.uuid
        else:
            ps2 = None
        time.sleep(5)
        ps_ops.attach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        time.sleep(5)
        delete_ps_list.pop()
    test_util.test_dsc('create VM by default para')
    vm1 = test_stub.create_multi_vms(name_prefix='vm1',
                                     count=1,
                                     data_volume_number=VOLUME_NUMBER,
                                     timeout=1200000)[0]
    test_obj_dict.add_vm(vm1)

    if ps_env.is_local_nfs_env:
        test_util.test_dsc('create date volume in ps2')
        volume = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=ps2)
        test_obj_dict.add_volume(volume)
    else:
        test_util.test_dsc('create VM in ps2')
        vm2 = test_stub.create_multi_vms(name_prefix='vm2',
                                         count=1,
                                         ps_uuid=ps2.uuid,
                                         data_volume_number=VOLUME_NUMBER,
                                         timeout=1200000)[0]
        test_obj_dict.add_vm(vm2)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
Esempio n. 18
0
def test():
    global ps_inv
    global ps_uuid
    global cluster_uuid
    global tag
    curr_deploy_conf = exp_ops.export_zstack_deployment_config(
        test_lib.deploy_config)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #pick up primary storage 1 and set system tag for instance offering.
    zone_name = os.environ.get('zoneName1')
    zone_uuid = res_ops.get_resource(res_ops.ZONE, name=zone_name)[0].uuid
    cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('nfsPrimaryStorageName1')
    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('cephPrimaryStorageName1')

    ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name=ps_name1)[0]
    ps_uuid = ps_inv.uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \
            conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multizones_vm_ps_ops')

    tag = tag_ops.create_system_tag('InstanceOfferingVO', \
            instance_offering_uuid, \
            'primaryStorage::allocator::uuid::%s' % ps_uuid)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name)[0]
    vm_creation_option.set_l3_uuids([l3.uuid])

    vm1 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm1)

    cluster_uuid = vm1.get_vm().clusterUuid

    test_util.test_dsc("Detach Primary Storage")
    ps_ops.detach_primary_storage(ps_uuid, cluster_uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
    vm1.update()
    vm1.set_state(vm_header.STOPPED)

    vm1.check()

    vm1.start()

    vm2 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm2)

    test_util.test_dsc("Delete Primary Storage")
    tag_ops.delete_tag(tag.uuid)
    ps_ops.delete_primary_storage(ps_inv.uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.DESTROYED)
    vm1.set_state(vm_header.DESTROYED)
    vm1.check()

    test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.DESTROYED)
    vm2.set_state(vm_header.DESTROYED)
    vm2.check()

    try:
        vm3 = test_lib.lib_create_vm(vm_creation_option)
    except:
        test_util.test_logger(
            'Catch expected vm creation exception, since primary storage has been deleted. '
        )
    else:
        test_util.test_fail(
            'Fail: Primary Storage has been deleted. But vm is still created with it.'
        )

    test_stub.recover_ps(ps_inv)
    test_util.test_dsc("Attach Primary Storage")

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Test primary storage operations Success')
def test():
    global ps_uuid
    global cluster_uuid
    global tag
    curr_deploy_conf = exp_ops.export_zstack_deployment_config(
        test_lib.deploy_config)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    cluster1_name = os.environ.get('clusterName1')
    cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name)[0]
    #pick up primary storage 1 and set system tag for instance offering.
    zone_name = os.environ.get('zoneName1')
    zone_uuid = res_ops.get_resource(res_ops.ZONE, name=zone_name)[0].uuid
    cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('nfsPrimaryStorageName1')
    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('cephPrimaryStorageName1')
    ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name=ps_name1)[0]
    ps_uuid = ps_inv.uuid
    cluster_uuid = cluster1.uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \
            conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multizones_vm_detach_ps')
    vm_creation_option.set_cluster_uuid(cluster_uuid)

    tag = tag_ops.create_system_tag('InstanceOfferingVO', \
            instance_offering_uuid, \
            'primaryStorage::allocator::uuid::%s' % ps_uuid)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name)[0]
    vm_creation_option.set_l3_uuids([l3.uuid])

    vm1 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm1)

    vm2 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm2)

    volume1 = test_stub.create_volume()
    test_obj_dict.add_volume(volume1)
    volume1.attach(vm1)

    test_util.test_dsc("Detach Primary Storage")
    ps_ops.detach_primary_storage(ps_uuid, cluster_uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
    test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED)
    vm1.update()
    vm1.set_state(vm_header.STOPPED)
    vm2.update()
    vm2.set_state(vm_header.STOPPED)

    vm1.check()
    vm2.check()

    test_util.test_dsc("Attach Primary Storage")
    ps_ops.attach_primary_storage(ps_uuid, cluster_uuid)
    vm1.start()
    vm2.start()

    vm3 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm3)

    vm1.check()
    volume1.check()
    vm2.check()
    vm3.check()

    test_util.test_dsc("Delete new added tag")
    tag_ops.delete_tag(tag.uuid)

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Test detaching primary storage Success')
def test():
    global ps_inv
    global ps_uuid
    global cluster_uuid
    global tag
    curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #pick up primary storage 1 and set system tag for instance offering.
    zone_name = os.environ.get('zoneName1')
    zone_uuid = res_ops.get_resource(res_ops.ZONE, name = zone_name)[0].uuid
    cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('nfsPrimaryStorageName1')
    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('cephPrimaryStorageName1')

    ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name1)[0]
    ps_uuid = ps_inv.uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \
            conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multizones_vm_ps_ops')

    tag = tag_ops.create_system_tag('InstanceOfferingVO', \
            instance_offering_uuid, \
            'primaryStorage::allocator::uuid::%s' % ps_uuid)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name)[0]
    vm_creation_option.set_l3_uuids([l3.uuid])

    vm1 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm1)

    cluster_uuid = vm1.get_vm().clusterUuid

    test_util.test_dsc("Detach Primary Storage")
    ps_ops.detach_primary_storage(ps_uuid, cluster_uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
    vm1.update()
    vm1.set_state(vm_header.STOPPED)

    vm1.check()

    vm1.start()

    vm2 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm2)
    
    test_util.test_dsc("Delete Primary Storage")
    tag_ops.delete_tag(tag.uuid)
    ps_ops.delete_primary_storage(ps_inv.uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.DESTROYED)
    vm1.set_state(vm_header.DESTROYED)
    vm1.check()

    test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.DESTROYED)
    vm2.set_state(vm_header.DESTROYED)
    vm2.check()

    try:
        vm3 = test_lib.lib_create_vm(vm_creation_option)
    except:
        test_util.test_logger('Catch expected vm creation exception, since primary storage has been deleted. ')
    else:
        test_util.test_fail('Fail: Primary Storage has been deleted. But vm is still created with it.')

    test_stub.recover_ps(ps_inv)
    test_util.test_dsc("Attach Primary Storage")

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Test primary storage operations Success')
def test():
    global ps_uuid
    global cluster_uuid
    global tag
    curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    cluster1_name = os.environ.get('clusterName1')
    cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0]
    #pick up primary storage 1 and set system tag for instance offering.
    zone_name = os.environ.get('zoneName1')
    zone_uuid = res_ops.get_resource(res_ops.ZONE, name = zone_name)[0].uuid
    cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('nfsPrimaryStorageName1')
    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('cephPrimaryStorageName1')
    ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name1)[0]
    ps_uuid = ps_inv.uuid
    cluster_uuid = cluster1.uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \
            conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multizones_vm_detach_ps')
    vm_creation_option.set_cluster_uuid(cluster_uuid)

    tag = tag_ops.create_system_tag('InstanceOfferingVO', \
            instance_offering_uuid, \
            'primaryStorage::allocator::uuid::%s' % ps_uuid)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name)[0]
    vm_creation_option.set_l3_uuids([l3.uuid])

    vm1 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm1)

    vm2 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm2)

    volume1 = test_stub.create_volume()
    test_obj_dict.add_volume(volume1)
    volume1.attach(vm1)

    test_util.test_dsc("Detach Primary Storage")
    ps_ops.detach_primary_storage(ps_uuid, cluster_uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
    test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED)
    vm1.update()
    vm1.set_state(vm_header.STOPPED)
    vm2.update()
    vm2.set_state(vm_header.STOPPED)

    vm1.check()
    vm2.check()

    test_util.test_dsc("Attach Primary Storage")
    ps_ops.attach_primary_storage(ps_uuid, cluster_uuid)
    vm1.start()
    vm2.start()

    vm3 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm3)

    vm1.check()
    volume1.check()
    vm2.check()
    vm3.check()

    test_util.test_dsc("Delete new added tag")
    tag_ops.delete_tag(tag.uuid)

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Test detaching primary storage Success')