def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for delete_ps in delete_ps_list:
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(delete_ps.name)
        ps_config.set_description(delete_ps.description)
        ps_config.set_zone_uuid(delete_ps.zoneUuid)
        ps_config.set_type(delete_ps.type)
        ps_config.set_url(delete_ps.url)
        if delete_ps.type == inventory.LOCAL_STORAGE_TYPE:
            new_ps = ps_ops.create_local_primary_storage(ps_config)
        elif delete_ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            new_ps = ps_ops.create_nfs_primary_storage(ps_config)
        elif delete_ps.type == "SharedBlock":
            host = random.choice(res_ops.query_resource(res_ops.HOST))
            cmd = "vgchange --lock-start %s && vgremove %s -y" % (delete_ps.uuid, delete_ps.uuid)
            host_username = os.environ.get('hostUsername')
            host_password = os.environ.get('hostPassword')
            rsp = test_lib.lib_execute_ssh_cmd(host.managementIp, host_username, host_password, cmd, 240)
            if not rsp:
                test_util.test_logger("vgremove failed")
            new_ps = ps_ops.create_sharedblock_primary_storage(ps_config, disk_uuid)
        else:
            new_ps = None

        ps_ops.attach_primary_storage(new_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for delete_ps in delete_ps_list:
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(delete_ps.name)
        ps_config.set_description(delete_ps.description)
        ps_config.set_zone_uuid(delete_ps.zoneUuid)
        ps_config.set_type(delete_ps.type)
        ps_config.set_url(delete_ps.url)
        if delete_ps.type == inventory.LOCAL_STORAGE_TYPE:
            new_ps = ps_ops.create_local_primary_storage(ps_config)
        elif delete_ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            new_ps = ps_ops.create_nfs_primary_storage(ps_config)
        elif delete_ps.type == "SharedBlock":
            host = random.choice(res_ops.query_resource(res_ops.HOST))
            cmd = "vgchange --lock-start %s && vgremove %s -y" % (
                delete_ps.uuid, delete_ps.uuid)
            host_username = os.environ.get('hostUsername')
            host_password = os.environ.get('hostPassword')
            rsp = test_lib.lib_execute_ssh_cmd(host.managementIp,
                                               host_username, host_password,
                                               cmd, 240)
            if not rsp:
                test_util.test_logger("vgremove failed")
            new_ps = ps_ops.create_sharedblock_primary_storage(
                ps_config, disk_uuid)
        else:
            new_ps = None

        ps_ops.attach_primary_storage(
            new_ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
def env_recover():
    for ps in detached_ps_list:
        ps_ops.attach_primary_storage(ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach random one Primary Storage from cluster')
    selected_ps = random.choice([env.first_ps, env.second_ps])
    if selected_ps is env.first_ps:
        another_ps = env.second_ps
    else:
        another_ps = env.first_ps

    for _ in xrange(5):
        ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.append(selected_ps)
        ps_ops.attach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.pop()

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in env.get_vm_list_from_ps(selected_ps):
        assert vm.get_vm().state == inventory.STOPPED

    for vm in env.get_vm_list_from_ps(another_ps):
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Recover the vm in the selected ps")
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.start()
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.check()
        vm.update()
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Create one vm in selected ps")
    vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                         first_ps_vm_number=VM_COUNT,
                                         second_ps_vm_number=VM_COUNT,
                                         first_ps_volume_number=VOLUME_NUMBER,
                                         second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach random one Primary Storage from cluster')
    selected_ps = random.choice([env.first_ps, env.second_ps])
    if selected_ps is env.first_ps:
        another_ps = env.second_ps
    else:
        another_ps = env.first_ps

    for _ in xrange(5):
        ps_ops.detach_primary_storage(
            selected_ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.append(selected_ps)
        ps_ops.attach_primary_storage(
            selected_ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.pop()

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in env.get_vm_list_from_ps(selected_ps):
        assert vm.get_vm().state == inventory.STOPPED

    for vm in env.get_vm_list_from_ps(another_ps):
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Recover the vm in the selected ps")
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.start()
    for vm in env.get_vm_list_from_ps(selected_ps):
        vm.check()
        vm.update()
        assert vm.get_vm().state == inventory.RUNNING

    test_util.test_dsc("Create one vm in selected ps")
    vm = test_stub.create_multi_vms(name_prefix='test-vm',
                                    count=1,
                                    ps_uuid=selected_ps.uuid)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover():
    for ps in detached_ps_list:
        ps_ops.attach_primary_storage(
            ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
    if new_ps_list:
        for new_ps in new_ps_list:
            ps_ops.detach_primary_storage(new_ps.uuid,
                                          new_ps.attachedClusterUuids[0])
            ps_ops.delete_primary_storage(new_ps.uuid)
Example #7
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()

    for _ in xrange(5):
        test_util.test_dsc('Remove ps2')
        ps_ops.detach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        ps_ops.delete_primary_storage(ps2.uuid)
        delete_ps_list.append(ps2)
        test_util.test_dsc('Add ps2 back')
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(ps2.name)
        ps_config.set_description(ps2.description)
        ps_config.set_zone_uuid(ps2.zoneUuid)
        ps_config.set_type(ps2.type)
        ps_config.set_url(ps2.url)
        if ps2.type == inventory.LOCAL_STORAGE_TYPE:
            ps2 = ps_ops.create_local_primary_storage(ps_config)
        elif ps2.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            ps2 = ps_ops.create_nfs_primary_storage(ps_config)
        else:
            ps2 = None
        time.sleep(5)
        ps_ops.attach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        time.sleep(5)
        delete_ps_list.pop()

    test_util.test_dsc('create VM by default para')
    vm1 = test_stub.create_multi_vms(name_prefix='vm1',
                                     count=1,
                                     data_volume_number=VOLUME_NUMBER)[0]
    test_obj_dict.add_vm(vm1)

    if ps_env.is_local_nfs_env:
        test_util.test_dsc('create date volume in ps2')
        volume = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=ps2)
        test_obj_dict.add_volume(volume)
    else:
        test_util.test_dsc('create VM in ps2')
        vm2 = test_stub.create_multi_vms(name_prefix='vm2',
                                         count=1,
                                         ps_uuid=ps2.uuid,
                                         data_volume_number=VOLUME_NUMBER)[0]
        test_obj_dict.add_vm(vm2)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def error_cleanup():
    global tag
    global ps_uuid
    global cluster_uuid
    try:
        ps_ops.attach_primary_storage(ps_uuid, cluster_uuid)
    except:
        pass

    test_lib.lib_error_cleanup(test_obj_dict)
    try:
        tag_ops.delete_tag(tag.uuid)
    except:
        pass
def error_cleanup():
    global tag
    global ps_uuid
    global cluster_uuid
    try:
        ps_ops.attach_primary_storage(ps_uuid, cluster_uuid)
    except:
        pass

    test_lib.lib_error_cleanup(test_obj_dict)
    try:
        tag_ops.delete_tag(tag.uuid)
    except:
        pass
def test():
    env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                         first_ps_vm_number=VM_COUNT,
                                         second_ps_vm_number=VM_COUNT,
                                         first_ps_volume_number=VOLUME_NUMBER,
                                         second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach all Primary Storage from cluster')

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.detach_primary_storage(ps.uuid, ps.attachedClusterUuids[0])
        detached_ps_list.append(ps)

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.STOPPED

    with test_stub.expected_failure('Create vm when no ps attached to cluster',
                                    Exception):
        test_stub.create_multi_vms(name_prefix='test-vm', count=1)

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.attach_primary_storage(
            ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.remove(ps)

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.start()

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.check()
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.RUNNING

    vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1)[0]
    test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
Example #11
0
def env_recover():
    test_lib.lib_error_cleanup(test_obj_dict)
    for delete_ps in delete_ps_list:
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(delete_ps.name)
        ps_config.set_description(delete_ps.description)
        ps_config.set_zone_uuid(delete_ps.zoneUuid)
        ps_config.set_type(delete_ps.type)
        ps_config.set_url(delete_ps.url)
        if delete_ps.type == inventory.LOCAL_STORAGE_TYPE:
            new_ps = ps_ops.create_local_primary_storage(ps_config)
        elif delete_ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            new_ps = ps_ops.create_nfs_primary_storage(ps_config)
        else:
            new_ps = None

        ps_ops.attach_primary_storage(
            new_ps.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
def recover_ps():
    global ps_inv
    ps_config = test_util.PrimaryStorageOption()

    ps_config.set_name(ps_inv.name)
    ps_config.set_description(ps_inv.description)
    ps_config.set_zone_uuid(ps_inv.zoneUuid)
    ps_config.set_type(ps_inv.type)
    ps_config.set_url(ps_inv.url)

    #avoid of ps is already created successfully. 
    cond = res_ops.gen_query_conditions('zoneUuid', '=', ps_inv.zoneUuid)
    cond = res_ops.gen_query_conditions('url', '=', ps_inv.url, cond)
    curr_ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
    if curr_ps:
        ps = curr_ps[0]
    else:
        ps = ps_ops.create_nfs_primary_storage(ps_config)

    for cluster_uuid in ps_inv.attachedClusterUuids:
        ps_ops.attach_primary_storage(ps.uuid, cluster_uuid)
def recover_ps():
    global ps_inv
    ps_config = test_util.PrimaryStorageOption()

    ps_config.set_name(ps_inv.name)
    ps_config.set_description(ps_inv.description)
    ps_config.set_zone_uuid(ps_inv.zoneUuid)
    ps_config.set_type(ps_inv.type)
    ps_config.set_url(ps_inv.url)

    #avoid of ps is already created successfully.
    cond = res_ops.gen_query_conditions('zoneUuid', '=', ps_inv.zoneUuid)
    cond = res_ops.gen_query_conditions('url', '=', ps_inv.url, cond)
    curr_ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
    if curr_ps:
        ps = curr_ps[0]
    else:
        ps = ps_ops.create_nfs_primary_storage(ps_config)

    for cluster_uuid in ps_inv.attachedClusterUuids:
        ps_ops.attach_primary_storage(ps.uuid, cluster_uuid)
def recover_ps(ps_inv):
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(ps_inv.name)
        ps_config.set_description(ps_inv.description)
        ps_config.set_zone_uuid(ps_inv.zoneUuid)
        ps_config.set_type(ps_inv.type)
        ps_config.set_url(ps_inv.url)

        #avoid of ps is already created successfully. 
        cond = res_ops.gen_query_conditions('zoneUuid', '=', ps_inv.zoneUuid)
        cond = res_ops.gen_query_conditions('url', '=', ps_inv.url, cond)

    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_config = test_util.CephPrimaryStorageOption()
        ps_config.set_name(ps_inv.name)
        ps_config.set_description(ps_inv.description)
        ps_config.set_zone_uuid(ps_inv.zoneUuid)
        ps_config.set_type(ps_inv.type)
        for key in os.environ.keys():
            if ps_inv.mons[0].hostname in os.environ.get(key):
                ps_config.set_monUrls(os.environ.get(key).split(';'))

        #ps_config.set_dataVolumePoolName(ps_inv.dataVolumePoolName)
        #ps_config.set_rootVolumePoolName(ps_inv.rootVolumePoolName)
        #ps_config.set_imageCachePoolName(ps_inv.imageCachePoolName)

        #avoid of ps is already created successfully. 
        cond = res_ops.gen_query_conditions('zoneUuid', '=', ps_inv.zoneUuid)
        cond = res_ops.gen_query_conditions('name', '=', ps_inv.name, cond)
    curr_ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
    if curr_ps:
        ps = curr_ps[0]
    else:
        ps = ps_ops.create_primary_storage(ps_config)

    for cluster_uuid in ps_inv.attachedClusterUuids:
        ps_ops.attach_primary_storage(ps.uuid, cluster_uuid)
def test():
    global ps_uuid
    global cluster_uuid
    global tag
    curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    cluster1_name = os.environ.get('clusterName1')
    cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0]
    #pick up primary storage 1 and set system tag for instance offering.
    zone_name = os.environ.get('zoneName1')
    zone_uuid = res_ops.get_resource(res_ops.ZONE, name = zone_name)[0].uuid
    cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('nfsPrimaryStorageName1')
    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('cephPrimaryStorageName1')
    ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name1)[0]
    ps_uuid = ps_inv.uuid
    cluster_uuid = cluster1.uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \
            conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multizones_vm_detach_ps')
    vm_creation_option.set_cluster_uuid(cluster_uuid)

    tag = tag_ops.create_system_tag('InstanceOfferingVO', \
            instance_offering_uuid, \
            'primaryStorage::allocator::uuid::%s' % ps_uuid)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name)[0]
    vm_creation_option.set_l3_uuids([l3.uuid])

    vm1 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm1)

    vm2 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm2)

    volume1 = test_stub.create_volume()
    test_obj_dict.add_volume(volume1)
    volume1.attach(vm1)

    test_util.test_dsc("Detach Primary Storage")
    ps_ops.detach_primary_storage(ps_uuid, cluster_uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
    test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED)
    vm1.update()
    vm1.set_state(vm_header.STOPPED)
    vm2.update()
    vm2.set_state(vm_header.STOPPED)

    vm1.check()
    vm2.check()

    test_util.test_dsc("Attach Primary Storage")
    ps_ops.attach_primary_storage(ps_uuid, cluster_uuid)
    vm1.start()
    vm2.start()

    vm3 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm3)

    vm1.check()
    volume1.check()
    vm2.check()
    vm3.check()

    test_util.test_dsc("Delete new added tag")
    tag_ops.delete_tag(tag.uuid)

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Test detaching primary storage Success')
def test():
    ps_env = test_stub.PSEnvChecker()
    if ps_env.is_sb_ceph_env:
        env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    else:
        env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
                                             first_ps_vm_number=VM_COUNT,
                                             second_ps_vm_number=VM_COUNT,
                                             first_ps_volume_number=VOLUME_NUMBER,
                                             second_ps_volume_number=VOLUME_NUMBER)
    env.check_env()
    env.deploy_env()
    first_ps_vm_list = env.first_ps_vm_list
    second_ps_vm_list = env.second_ps_vm_list
    if env.new_ps:
        new_ps_list.append(env.second_ps)

    test_util.test_dsc('detach all Primary Storage from cluster')

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.detach_primary_storage(ps.uuid, ps.attachedClusterUuids[0])
        detached_ps_list.append(ps)

    test_util.test_dsc('All vm in selected ps should STOP')
    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.STOPPED

    ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE)
    if len(ps_list) == 2:
        with test_stub.expected_failure('Create vm when no ps attached to cluster', Exception):
            test_stub.create_multi_vms(name_prefix='test-vm', count=1)

    for ps in [env.first_ps, env.second_ps]:
        ps_ops.attach_primary_storage(ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        detached_ps_list.remove(ps)

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.start()

    for vm in first_ps_vm_list + second_ps_vm_list:
        vm.check()
        vm.update()

    for vm in first_ps_vm_list + second_ps_vm_list:
        assert vm.get_vm().state == inventory.RUNNING

    if ps_env.is_sb_ceph_env:
        vm1 = test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=env.first_ps.uuid, bs_type='ImageStoreBackupStorage')[0]
        vm2 = test_stub.create_multi_vms(name_prefix='text-vm', count=1, ps_uuid=env.second_ps.uuid, bs_type='Ceph')[0]
        test_obj_dict.add_vm(vm1)
        test_obj_dict.add_vm(vm2)
    else:
        vm = test_stub.create_multi_vms(name_prefix='text-vm', count=1, ps_uuid=env.second_ps.uuid)[0]
        test_obj_dict.add_vm(vm)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    host = dict()
    host['Broadwell'] = []
    host['Haswell-noTSX'] = []
    _hosts = res_ops.query_resource(res_ops.HOST)
    if len(_hosts) < 4:
        test_util.test_fail("this case need at least 4 hosts")

    for i in _hosts[:2]:
        test_stub.set_host_cpu_model(i.managementIp, model='Broadwell')
        host['Broadwell'].append(i)

    for i in _hosts[2:]:
        test_stub.set_host_cpu_model(i.managementIp, model='Haswell-noTSX')
        host['Haswell-noTSX'].append(i)

    hosts = host['Broadwell'] + host['Haswell-noTSX']
    clusters = res_ops.query_resource(res_ops.CLUSTER)
    for i in clusters:
        cls_ops.delete_cluster(i.uuid)
    clusters = []
    zone = res_ops.query_resource(res_ops.ZONE)[0]
    cluster_option = test_util.ClusterOption()
    cluster_option.set_hypervisor_type('KVM')
    cluster_option.set_zone_uuid(zone.uuid)

    cluster_option.set_name('Broadwell_1')
    cluster1 = cls_ops.create_cluster(cluster_option)
    tag_ops.create_system_tag('ClusterVO',
                              cluster1.uuid,
                              tag="clusterKVMCpuModel::Broadwell")
    clusters.append(cluster1)

    cluster_option.set_name('Broadwell_2')
    cluster2 = cls_ops.create_cluster(cluster_option)
    tag_ops.create_system_tag('ClusterVO',
                              cluster2.uuid,
                              tag="clusterKVMCpuModel::Broadwell")
    clusters.append(cluster2)

    cluster_option.set_name('Haswell-noTSX_1')
    cluster3 = cls_ops.create_cluster(cluster_option)
    tag_ops.create_system_tag('ClusterVO',
                              cluster3.uuid,
                              tag="clusterKVMCpuModel::Haswell-noTSX")
    clusters.append(cluster3)

    conditions = res_ops.gen_query_conditions('name', '=', 'vlan-test9')
    l2 = res_ops.query_resource(res_ops.L2_VLAN_NETWORK, conditions)[0]

    conditions = res_ops.gen_query_conditions('name', '=', 'l2-public')
    l2_public = res_ops.query_resource(res_ops.L2_NETWORK, conditions)[0]

    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)

    _hosts = []
    for i in range(len(clusters)):
        net_ops.attach_l2(l2.uuid, clusters[i].uuid)
        net_ops.attach_l2(l2_public.uuid, clusters[i].uuid)
        for j in ps:
            ps_ops.attach_primary_storage(j.uuid, clusters[i].uuid)
        host_option = test_util.HostOption()
        host_option.set_cluster_uuid(clusters[i].uuid)
        host_option.set_username('root')
        host_option.set_password('password')
        host_option.set_name(hosts[i].managementIp)
        host_option.set_management_ip(hosts[i].managementIp)
        _hosts.append(host_ops.add_kvm_host(host_option))

    # test
    host_option = test_util.HostOption()
    host_option.set_cluster_uuid(clusters[0].uuid)
    host_option.set_username('root')
    host_option.set_password('password')
    host_option.set_name(hosts[3].managementIp)
    host_option.set_management_ip(hosts[3].managementIp)
    try:
        _hosts.append(host_ops.add_kvm_host(host_option))
    except Exception as e:
        test_util.test_logger(e)

    host_option.set_cluster_uuid(clusters[2].uuid)
    try:
        _hosts.append(host_ops.add_kvm_host(host_option))
    except Exception as e:
        test_util.test_fail("test cluster cpu model faild")

    # migrate vm
    conditions = res_ops.gen_query_conditions('name', '=', 'ttylinux')
    img = res_ops.query_resource(res_ops.IMAGE, conditions)[0]
    ins = res_ops.query_resource(res_ops.INSTANCE_OFFERING)[0]
    conditions = res_ops.gen_query_conditions('name', '=', 'l3VlanNetwork9')
    l3 = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0]

    vms = []
    for i in [0, 2]:
        vm_option = test_util.VmOption()
        vm_option.set_name("vm")
        vm_option.set_image_uuid(img.uuid)
        vm_option.set_cluster_uuid(clusters[i].uuid)
        vm_option.set_host_uuid(_hosts[i].uuid)
        vm_option.set_instance_offering_uuid(ins.uuid)
        vm_option.set_l3_uuids([l3.uuid])
        vm_option.set_default_l3_uuid(l3.uuid)
        vms.append(vm_ops.create_vm(vm_option))

    time.sleep(20)
    try:
        vm_ops.migrate_vm(vms[0].uuid, _hosts[1].uuid)
    except Exception as e:
        test_util.test_fail(e)

    try:
        vm_ops.migrate_vm(vms[1].uuid, _hosts[1].uuid)
    except Exception as e:
        test_util.test_logger(e)

    test_util.test_pass("test cluster cpu model pass")
Example #18
0
def test():
    ps_env = test_stub.PSEnvChecker()

    ps1, ps2 = ps_env.get_two_ps()
    if ps2.type == 'SharedBlock':
        volumegroup_uuid = ps2.sharedBlocks[0].sharedBlockGroupUuid
        disk_uuid.append(ps2.sharedBlocks[0].diskUuid)

    for _ in xrange(5):
        test_util.test_dsc('Remove ps2')
        ps_ops.detach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        ps_ops.delete_primary_storage(ps2.uuid)
        delete_ps_list.append(ps2)
        test_util.test_dsc('Add ps2 back')
        ps_config = test_util.PrimaryStorageOption()
        ps_config.set_name(ps2.name)
        ps_config.set_description(ps2.description)
        ps_config.set_zone_uuid(ps2.zoneUuid)
        ps_config.set_type(ps2.type)
        ps_config.set_url(ps2.url)
        if ps2.type == inventory.LOCAL_STORAGE_TYPE:
            ps2 = ps_ops.create_local_primary_storage(ps_config)
        elif ps2.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
            ps2 = ps_ops.create_nfs_primary_storage(ps_config)
        elif ps2.type == "SharedBlock":
            host = random.choice(res_ops.query_resource(res_ops.HOST))
            cmd = "vgchange --lock-start %s && vgremove %s -y" % (
                volumegroup_uuid, volumegroup_uuid)
            host_username = os.environ.get('hostUsername')
            host_password = os.environ.get('hostPassword')
            rsp = test_lib.lib_execute_ssh_cmd(host.managementIp,
                                               host_username, host_password,
                                               cmd, 240)
            if not rsp:
                cmd = "vgs"
                rsp = test_lib.lib_execute_ssh_cmd(host.managementIp,
                                                   host_username,
                                                   host_password, cmd, 240)
                test_util.test_logger(rsp)
                test_util.test_fail("vgremove failed")
            ps2 = ps_ops.create_sharedblock_primary_storage(
                ps_config, disk_uuid)
            volumegroup_uuid = ps2.uuid
        else:
            ps2 = None
        time.sleep(5)
        ps_ops.attach_primary_storage(
            ps2.uuid,
            res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
        time.sleep(5)
        delete_ps_list.pop()
    test_util.test_dsc('create VM by default para')
    vm1 = test_stub.create_multi_vms(name_prefix='vm1',
                                     count=1,
                                     data_volume_number=VOLUME_NUMBER,
                                     timeout=1200000)[0]
    test_obj_dict.add_vm(vm1)

    if ps_env.is_local_nfs_env:
        test_util.test_dsc('create date volume in ps2')
        volume = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=ps2)
        test_obj_dict.add_volume(volume)
    else:
        test_util.test_dsc('create VM in ps2')
        vm2 = test_stub.create_multi_vms(name_prefix='vm2',
                                         count=1,
                                         ps_uuid=ps2.uuid,
                                         data_volume_number=VOLUME_NUMBER,
                                         timeout=1200000)[0]
        test_obj_dict.add_vm(vm2)

    test_util.test_pass('Multi PrimaryStorage Test Pass')
def test():
    global ps_uuid
    global cluster_uuid
    global tag
    curr_deploy_conf = exp_ops.export_zstack_deployment_config(
        test_lib.deploy_config)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    cluster1_name = os.environ.get('clusterName1')
    cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name)[0]
    #pick up primary storage 1 and set system tag for instance offering.
    zone_name = os.environ.get('zoneName1')
    zone_uuid = res_ops.get_resource(res_ops.ZONE, name=zone_name)[0].uuid
    cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
    ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
    if ps_inv.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('nfsPrimaryStorageName1')
    elif ps_inv.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        ps_name1 = os.environ.get('cephPrimaryStorageName1')
    ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name=ps_name1)[0]
    ps_uuid = ps_inv.uuid
    cluster_uuid = cluster1.uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \
            conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multizones_vm_detach_ps')
    vm_creation_option.set_cluster_uuid(cluster_uuid)

    tag = tag_ops.create_system_tag('InstanceOfferingVO', \
            instance_offering_uuid, \
            'primaryStorage::allocator::uuid::%s' % ps_uuid)

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name)[0]
    vm_creation_option.set_l3_uuids([l3.uuid])

    vm1 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm1)

    vm2 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm2)

    volume1 = test_stub.create_volume()
    test_obj_dict.add_volume(volume1)
    volume1.attach(vm1)

    test_util.test_dsc("Detach Primary Storage")
    ps_ops.detach_primary_storage(ps_uuid, cluster_uuid)

    test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
    test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED)
    vm1.update()
    vm1.set_state(vm_header.STOPPED)
    vm2.update()
    vm2.set_state(vm_header.STOPPED)

    vm1.check()
    vm2.check()

    test_util.test_dsc("Attach Primary Storage")
    ps_ops.attach_primary_storage(ps_uuid, cluster_uuid)
    vm1.start()
    vm2.start()

    vm3 = test_lib.lib_create_vm(vm_creation_option)
    test_obj_dict.add_vm(vm3)

    vm1.check()
    volume1.check()
    vm2.check()
    vm3.check()

    test_util.test_dsc("Delete new added tag")
    tag_ops.delete_tag(tag.uuid)

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Test detaching primary storage Success')
def test():
    host = dict()
    host['Broadwell'] = []
    host['Haswell-noTSX'] = []
    _hosts = res_ops.query_resource(res_ops.HOST)
    if len(_hosts) < 4:
        test_util.test_fail("this case need at least 4 hosts")

    for i in _hosts[:2]:
        test_stub.set_host_cpu_model(i.managementIp, model='Broadwell')
        host['Broadwell'].append(i)

    for i in _hosts[2:]:
        test_stub.set_host_cpu_model(i.managementIp, model='Haswell-noTSX')
        host['Haswell-noTSX'].append(i)

    hosts = host['Broadwell'] + host['Haswell-noTSX']
    clusters = res_ops.query_resource(res_ops.CLUSTER)
    for i in clusters:
        cls_ops.delete_cluster(i.uuid)
    clusters = []
    zone = res_ops.query_resource(res_ops.ZONE)[0]
    cluster_option = test_util.ClusterOption()
    cluster_option.set_hypervisor_type('KVM')
    cluster_option.set_zone_uuid(zone.uuid)

    cluster_option.set_name('Broadwell_1')
    cluster1 = cls_ops.create_cluster(cluster_option)
    tag_ops.create_system_tag('ClusterVO',cluster1.uuid,tag="clusterKVMCpuModel::Broadwell")
    clusters.append(cluster1)

    cluster_option.set_name('Broadwell_2')
    cluster2 = cls_ops.create_cluster(cluster_option)
    tag_ops.create_system_tag('ClusterVO',cluster2.uuid,tag="clusterKVMCpuModel::Broadwell")
    clusters.append(cluster2)

    cluster_option.set_name('Haswell-noTSX_1')
    cluster3 = cls_ops.create_cluster(cluster_option)
    tag_ops.create_system_tag('ClusterVO',cluster3.uuid,tag="clusterKVMCpuModel::Haswell-noTSX")
    clusters.append(cluster3)

    conditions = res_ops.gen_query_conditions('name', '=', 'vlan-test9')
    l2 = res_ops.query_resource(res_ops.L2_VLAN_NETWORK, conditions)[0]

    conditions = res_ops.gen_query_conditions('name', '=', 'l2-public')
    l2_public = res_ops.query_resource(res_ops.L2_NETWORK, conditions)[0]

    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)


    _hosts = []
    for i in range(len(clusters)):
        net_ops.attach_l2(l2.uuid, clusters[i].uuid)
        net_ops.attach_l2(l2_public.uuid, clusters[i].uuid)
        for j in ps:
            ps_ops.attach_primary_storage(j.uuid, clusters[i].uuid)
        host_option = test_util.HostOption()
        host_option.set_cluster_uuid(clusters[i].uuid)
        host_option.set_username('root')
        host_option.set_password('password')
        host_option.set_name(hosts[i].managementIp)
        host_option.set_management_ip(hosts[i].managementIp)
        _hosts.append(host_ops.add_kvm_host(host_option))

    # test
    host_option = test_util.HostOption()
    host_option.set_cluster_uuid(clusters[0].uuid)
    host_option.set_username('root')
    host_option.set_password('password')
    host_option.set_name(hosts[3].managementIp)
    host_option.set_management_ip(hosts[3].managementIp)
    try:
        _hosts.append(host_ops.add_kvm_host(host_option))
    except Exception as e:
        test_util.test_logger(e)

    cond = res_ops.gen_query_conditions('name', '=', hosts[3].managementIp)
    host_uuid = res_ops.query_resource(res_ops.HOST, cond)[0].uuid
    host_ops.delete_host(host_uuid)
    host_option.set_cluster_uuid(clusters[2].uuid)
    try:
        _hosts.append(host_ops.add_kvm_host(host_option))
    except Exception as e:
        test_util.test_fail("test cluster cpu model faild")

    # migrate vm
    conditions = res_ops.gen_query_conditions('name', '=', 'ttylinux')
    img = res_ops.query_resource(res_ops.IMAGE, conditions)[0]
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    ins = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0]
    conditions = res_ops.gen_query_conditions('name', '=', 'public network')
    l3 = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0]


    vms = []
    for i in [0,2]:
        vm_option = test_util.VmOption()
        vm_option.set_name("vm")
        vm_option.set_image_uuid(img.uuid)
        vm_option.set_cluster_uuid(clusters[i].uuid)
        vm_option.set_host_uuid(_hosts[i].uuid)
        vm_option.set_instance_offering_uuid(ins.uuid)
        vm_option.set_l3_uuids([l3.uuid])
        vm_option.set_default_l3_uuid(l3.uuid)
        vms.append(vm_ops.create_vm(vm_option))

    time.sleep(20)
    try:
        vm_ops.migrate_vm(vms[0].uuid, _hosts[1].uuid)
    except Exception as e:
        test_util.test_fail(e)

    try:
        vm_ops.migrate_vm(vms[1].uuid, _hosts[1].uuid)
    except Exception as e:
        test_util.test_logger(e)

    test_util.test_pass("test cluster cpu model pass")