def _cleanup_previous_checking_point(self):
        if not self.get_utility_vm():
            return

        if not self.get_target_volume():
            return

        test_util.test_logger('cleanup checking point files for target volume: %s' % self.get_target_volume())
        volume_obj = self.get_target_volume()
        volume = volume_obj.get_volume()
        if volume.type == 'Root':
            test_util.test_logger('Can not add checking point file for Root Volume: %s, since it can not be detached and reattached to utility vm for checking.' % volume.uuid)
            return

        volume_vm = volume_obj.get_target_vm()
        #check if volume has been attached to the living VM.
        if volume_obj.get_state() == volume_header.ATTACHED:
            if volume_vm.get_state() == vm_header.STOPPED or \
                    volume_vm.get_state() == vm_header.RUNNING:
                #test_util.test_logger('volume has been attached to living VM.')
                volume_obj.detach()
                volume_obj.attach(self.utility_vm)
                self._remove_checking_file()
                volume_obj.detach()
                volume_obj.attach(volume_vm)
                return 

        volume_obj.attach(self.utility_vm)
        self._remove_checking_file()
        volume_obj.detach()
def test():
    global test_obj_dict, bs, ps
    #judge whether BS is imagestore
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)
    for i in bs:
        if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
	    break
    else:
        test_util.test_skip('Skip test on non-imagestore')

    #judge whether PS is SharedBlock
    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
    for i in ps:
        if i.type in ['SharedBlock', 'AliyunNAS']:
            test_util.test_skip('Skip test on SharedBlock and PS')

    image_name = os.environ.get('imageName_s')
    l3_name = os.environ.get('l3PublicNetworkName')
    vm = test_stub.create_vm("test_vm", image_name, l3_name)
    #vm.check()
    test_obj_dict.add_vm(vm)

    new_vm = vm.clone(['test_vm_clone_with_on_data_volume'], full=True)[0]
    test_obj_dict.add_vm(new_vm)

    volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm))
    if volumes_number != 1:
        test_util.test_fail('Did not find 1 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number))
    else:
        test_util.test_logger('Find 1 volumes for [vm:] %s.' % new_vm.vm.uuid)

    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass('Test clone vm with one data volume Success')
def delete_scheduler(uuid, session_uuid = None):
    action = api_actions.DeleteSchedulerAction()
    action.uuid = uuid
    test_util.action_logger('Delete [Scheduler:] %s' % uuid)
    evt = account_operations.execute_action_with_session(action, session_uuid) 
    test_util.test_logger('[Scheduler:] %s is deleted.' % uuid)
    return evt
    def add_checking_point(self):
        volume_obj = self.get_target_volume()
        volume = volume_obj.get_volume()
        if volume.type == 'Root':
            test_util.test_logger('Can not add checking point file for Root Volume: %s, since it can not be detached and reattached to utility vm for checking.' % volume.uuid)
            return

        volume_vm = volume_obj.get_target_vm()
        #check if volume has been attached to the living VM.
        if volume_obj.get_state() == volume_header.ATTACHED:
            if volume_vm.get_state() == vm_header.STOPPED or \
                    volume_vm.get_state() == vm_header.RUNNING:
                test_util.test_logger('volume has been attached to living VM.')

                volume_obj.detach()
                volume_obj.attach(self.utility_vm)
                #add checking point
                self._create_checking_file()
                volume_obj.detach()
                volume_obj.attach(volume_vm)
                return 
        volume_obj.attach(self.utility_vm)
        #add_checking_point
        self._create_checking_file()
        volume_obj.detach()
def test():

    global bs_username, bs_hostname, bs_password, bs_name, bs_username, bs_url, bs_sshport
    global new_image    

    file_path = test_stub.gen_license('woodpecker', '*****@*****.**', '1', 'Prepaid', '1', '')
    test_stub.load_license(file_path)
    issued_date = test_stub.get_license_info().issuedDate
    expired_date = test_stub.license_date_cal(issued_date, 86400 * 1)
    test_stub.check_license("*****@*****.**", 1, None, False, 'Paid', issued_date=issued_date, expired_date=expired_date)

    test_util.test_logger('create zone and add the bs of the imagestore')
    node_uuid = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].uuid
    test_stub.create_zone()
    zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid

    bs_name = 'BS1'
    bs_hostname = os.environ.get('node1Ip')
    bs_username = os.environ.get('nodeUserName')
    bs_password = os.environ.get('nodePassword')
    bs_url = '/zstack_bs'
    bs_sshport = '22'
    test_stub.create_image_store_backup_storage(bs_name, bs_hostname, bs_username, bs_password, bs_url, bs_sshport)
    bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0].uuid

    test_stub.reload_default_license()
    test_util.test_logger('Check default community license')
    #test_stub.check_license(None, None, 2147483647, False, 'Community')

    try:
        bs_ops.reconnect_backup_storage(bs_uuid)
    except Exception, e:
        if "commercial" in str(e):
            test_util.test_pass('test reconnect bs failed, An operation failed, details: commercial license is required to use ImageStore.')
Ejemplo n.º 6
0
    def _add_zone(zone, zone_duplication):
        action = api_actions.CreateZoneAction()
        action.sessionUuid = session_uuid
        if zone_duplication == 0:
            action.name = zone.name_
            action.description = zone.description__
        else:
            action.name = generate_dup_name(zone.name_, zone_duplication, 'z')
            action.description = generate_dup_name(zone.description__, zone_duplication, 'zone')

        try:
            evt = action.run()
            test_util.test_logger(jsonobject.dumps(evt))
            zinv = evt.inventory
        except:
            exc_info.append(sys.exc_info())
     
        if xmlobject.has_element(zone, 'backupStorageRef'):
            for ref in xmlobject.safe_list(zone.backupStorageRef):
                bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=ref.text_)
                bs = get_first_item_from_list(bss, 'Backup Storage', ref.text_, 'attach backup storage to zone')

                action = api_actions.AttachBackupStorageToZoneAction()
                action.sessionUuid = session_uuid
                action.backupStorageUuid = bs.uuid
                action.zoneUuid = zinv.uuid
                try:
                    evt = action.run()
                    test_util.test_logger(jsonobject.dumps(evt))
                except:
                    exc_info.append(sys.exc_info())
def vm_op_test(vm, op):
    test_util.test_logger(vm.vm.name + "-------" + op)
    ops = {
        "VM_TEST_STOP": stop,
        "VM_TEST_REBOOT": reboot,
        "VM_TEST_NONE": do_nothing,
        "VM_TEST_MIGRATE": migrate,
        "VM_TEST_SNAPSHOT": create_snapshot,
        "VM_TEST_CREATE_IMG": create_image,
        "VM_TEST_RESIZE_RVOL": resize_rvol,
        "RVOL_DEL_SNAPSHOT": delete_snapshot,
        "VM_TEST_CHANGE_OS": change_os,
        "VM_TEST_RESET": reset,
        "VM_TEST_BACKUP": back_up,
        "VM_TEST_REVERT_BACKUP": revert_backup,
        "VM_TEST_REVERT_VM_BACKUP": revert_vm_backup,
        "VM_TEST_BACKUP_IMAGE": backup_image, 
        "DVOL_TEST_SNAPSHOT": create_dvol_snapshot,
        "DVOL_DEL_SNAPSHOT": delete_dvol_snapshot,
        "DVOL_TEST_CREATE_IMG": create_dvol_image,
        "DVOL_TEST_RESIZE": resize_dvol,
        "DVOL_BACKUP": dvol_back_up,
        "DVOL_TEST_BACKUP_IMAGE": dvol_backup_image,
        "CREATE_ATTACH_VOLUME": create_attach_volume

    }
    ops[op](vm)
def add_zone_resource(deploy_config, zone_name):
    session_uuid = acc_ops.login_as_admin()
    try:
        test_util.test_dsc('-------add zone operation-------')
        dep_ops.add_zone(deploy_config, session_uuid, zone_name = zone_name)
        test_util.test_dsc('-------add l2 operation-------')
        dep_ops.add_l2_network(deploy_config, session_uuid, \
                zone_name = zone_name)
        test_util.test_dsc('-------add primary stroage operation-------')
        dep_ops.add_primary_storage(deploy_config, session_uuid, \
                zone_name = zone_name)
        test_util.test_dsc('-------add cluster operation-------')
        dep_ops.add_cluster(deploy_config, session_uuid, \
                zone_name = zone_name)
        test_util.test_dsc('-------add host operation-------')
        dep_ops.add_host(deploy_config, session_uuid, \
                zone_name = zone_name)
        test_util.test_dsc('-------add l3 operation-------')
        dep_ops.add_l3_network(deploy_config, session_uuid, \
                zone_name = zone_name)
        test_util.test_dsc('-------add virtual router offering operation-------')
        dep_ops.add_virtual_router(deploy_config, session_uuid, \
                zone_name = zone_name)
        zone = res_ops.get_resource(res_ops.ZONE, session_uuid, \
                name = zone_name)[0]
    except Exception as e:
        test_util.test_logger('[Error] zstack deployment meets exception when adding zone resource .')
        traceback.print_exc(file=sys.stdout)
        raise e
    finally:
        acc_ops.logout(session_uuid)

    test_util.action_logger('Complete add zone resources for [uuid:] %s' \
            % zone.uuid)
Ejemplo n.º 9
0
def deploy_initial_database(deploy_config):
    operations = [
            add_backup_storage,
            add_zone,
            add_l2_network,
            add_primary_storage,
            add_cluster,
            add_host,
            add_l3_network,
            add_image,
            add_disk_offering,
            add_instance_offering,
            add_virtual_router
            ]
    for operation in operations:
        session_uuid = account_operations.login_as_admin()
        try:
            operation(deploy_config, session_uuid)
        except Exception as e:
            test_util.test_logger('[Error] zstack deployment meets exception when doing: %s . The real exception are:.' % operation.__name__)
            print('----------------------Exception Reason------------------------')
            traceback.print_exc(file=sys.stdout)
            print('-------------------------Reason End---------------------------\n')
            raise e
        finally:
            account_operations.logout(session_uuid)

    test_util.test_logger('[Done] zstack initial database was created successfully.')
def test():
    ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard")
    vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid)
    test_obj_dict.add_vm(vm1)

    vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid)
    test_obj_dict.add_vm(vm2)
    assert vm1.get_vm().hostUuid != vm2.get_vm().hostUuid
  
    vm3 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid)
    test_obj_dict.add_vm(vm3)
    assert vm1.get_vm().hostUuid != vm3.get_vm().hostUuid
    assert vm2.get_vm().hostUuid != vm3.get_vm().hostUuid

    try:
        vm4 = None
        vm4 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid)
    except:
        if not vm4:
            test_util.test_logger("vm4 isn't created as expected")
    finally:
        if vm4:
            test_util.test_fail("Test Fail, vm4 [uuid:%s] is not expected to be created" % vm4.get_vm().uuid)
    test_lib.lib_error_cleanup(test_obj_dict)
    ag_ops.delete_affinity_group(ag1.uuid)
    test_util.test_pass("Affinity Group antiHard policy pass")
def test():
    global vm
    global vip_s_vm_cfg_lst

    vip_s_vm_cfg_lst = test_stub.get_s_vm_cfg_lst_vip_bind(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(vip_s_vm_cfg_lst) != 1:
        test_util.test_fail('vip has been running on %d host(s)' % len(vip_s_vm_cfg_lst))

    test_util.test_logger("disconnect host [%s]" % (vip_s_vm_cfg_lst[0].ip_))
    #test_stub.down_host_network(vip_s_vm_cfg_lst[0].ip_, test_lib.all_scenario_config)  
    test_stub.exec_zsha2_demote(vip_s_vm_cfg_lst[0].ip_, "root", "password")

    time.sleep(5)

    expected_vip_s_vm_cfg_lst_ip = test_stub.get_expected_vip_s_vm_cfg_lst_after_switch(test_lib.all_scenario_config, test_lib.scenario_file, vip_s_vm_cfg_lst[0].ip_)
    if not test_stub.check_if_vip_is_on_host(test_lib.all_scenario_config, test_lib.scenario_file, expected_vip_s_vm_cfg_lst_ip):
        test_util.test_fail("find vip should drift on ip %s, but is not on it." %(expected_vip_s_vm_cfg_lst_ip))

    vip_s_vm_cfg_lst_new = test_stub.get_s_vm_cfg_lst_vip_bind(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(vip_s_vm_cfg_lst_new) != 1:
        test_util.test_fail('vip has been running on %d host(s)' % len(vip_s_vm_cfg_lst_new))

    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected(exclude_host=[vip_s_vm_cfg_lst[0]])
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 12
0
def detach_l3(nic_uuid, session_uuid = None):
    action = api_actions.DetachL3NetworkFromVmAction()
    action.vmNicUuid = nic_uuid
    test_util.action_logger('[Detach L3 Network Nic]: %s' % nic_uuid)
    evt = acc_ops.execute_action_with_session(action, session_uuid)
    test_util.test_logger('[L3 Network Nic]: %s has been detached'% nic_uuid)
    return evt.inventory
Ejemplo n.º 13
0
def test():
    test_util.test_dsc('''
    Test Description:
        Will create 1 VM with 3 l3 networks. 1 l3_network is not using VR; 1 l3_network is using novlan VR; 1 l3_network is using vlan VR. 
    Resource required:
        Need support 3 VMs (1 test VM + 2 VR VMs) existing at the same time. 
        This test required a special image, which was configed with at least 3 enabled NICs (e.g. eth0, eth1, eth2).
    ''')
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    l3_net_list = [l3_net_uuid]
    l3_name = os.environ.get('l3VlanNetworkName3')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    l3_net_list.append(l3_net_uuid)
    l3_name = os.environ.get('l3VlanNetworkName4')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    l3_net_list.append(l3_net_uuid)

    vm = test_stub.create_vm(l3_net_list, image_uuid, '3_l3_vm')
    test_obj_dict.add_vm(vm)
    vm.check()

    if len(vm.vm.vmNics) == 3:
        test_util.test_logger("Find 3 expected Nics in new created VM.")
    else:
        test_util.test_fail("New create VM doesn't not have 3 Nics. It only have %s" % len(vm.get_vm().vmNics))

    vm.destroy()
    test_util.test_pass('Create 1 VM with 3 l3_network (1 vlan VR, 1 novlan VR and 1 no VR L3network) successfully.')
def test():
    global vm_inv
    test_util.test_dsc('Create test vm to test zstack upgrade by -u.')

    image_name = os.environ.get('imageName_i_c7_z_1.9')
    #iso_path = os.environ.get('iso_path')
    zstack_latest_version = os.environ.get('zstackLatestVersion')
    zstack_latest_path = os.environ.get('zstackLatestInstaller')
    vm_name = os.environ.get('vmName')
    #upgrade_script_path = os.environ.get('upgradeScript')

    vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
    vm_ip = vm_inv.vmNics[0].ip
    test_lib.lib_wait_target_up(vm_ip, 22)

    test_stub.make_ssh_no_password(vm_ip, tmp_file)

    test_util.test_logger('Update MN IP')
    test_stub.update_mn_hostname(vm_ip, tmp_file)
    test_stub.update_mn_ip(vm_ip, tmp_file)
    test_stub.start_mn(vm_ip, tmp_file)
    test_stub.check_installation(vm_ip, tmp_file)

    test_util.test_logger('Upgrade zstack to latest with repo') 
    #test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
    test_stub.update_repo(vm_ip, tmp_file)
    test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file) 
    test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
    test_stub.start_mn(vm_ip, tmp_file)
    test_stub.check_mn_running(vm_ip, tmp_file)
    test_stub.check_installation(vm_ip, tmp_file)

    os.system('rm -f %s' % tmp_file)
    test_stub.destroy_vm_scenario(vm_inv.uuid)
    test_util.test_pass('ZStack upgrade Test Success')
def test():
    h1_name = os.environ.get("hostName")
    cond = res_ops.gen_query_conditions('name', '=', h1_name)
    h1 = res_ops.query_resource(res_ops.HOST, cond)
    ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard")
    vm1 = test_stub.create_ag_vm(host_uuid=h1[0].uuid)
    assert vm1.get_vm().hostUuid == h1[0].uuid
    test_obj_dict.add_vm(vm1)

    new_vm = vm1.clone(names=["clone-vm1", "clone-vm2", "clone-vm3"], systemtag=["affinityGroupUuid::%s" % ag1.uuid])
    test_obj_dict.add_vm(new_vm[0])
    test_obj_dict.add_vm(new_vm[1])
    test_obj_dict.add_vm(new_vm[2])
    vmuuids = []
    ag = test_lib.lib_get_affinity_group_by_name(name="ag1")
    for usage in ag.usages:
        vmuuids.append(usage.resourceUuid)
    assert new_vm[0].get_vm().uuid in vmuuids
    assert new_vm[1].get_vm().uuid in vmuuids
    assert new_vm[2].get_vm().uuid in vmuuids
    assert len(vmuuids) == 3
    
    try:
        ag_ops.add_vm_to_affinity_group(ag1.uuid, vm1.get_vm().uuid) 
    except:
        test_util.test_logger("vm1 is not expected to add into affinity group [uuid: %s]" % ag1.uuid)
    vmuuids = []
    ag = test_lib.lib_get_affinity_group_by_name(name="ag1")
    for usage in ag.usages:
        vmuuids.append(usage.resourceUuid)
    assert vm1.get_vm().uuid not in vmuuids
 
    test_lib.lib_error_cleanup(test_obj_dict)
    ag_ops.delete_affinity_group(ag1.uuid)
    test_util.test_pass("Affinity Group antiHard policy pass")
Ejemplo n.º 16
0
def test():
    if os.environ.get('ZSTACK_SIMULATOR') == "yes":
        if os.environ.get('WOODPECKER_PARALLEL') != None and os.environ.get('WOODPECKER_PARALLEL') == '0':
            destroy_initial_database()
            deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file)
        else:
            test_util.test_logger('Skip case setup since parallel testing')
Ejemplo n.º 17
0
def test_scp_vm_inbound_speed(vm_inv, bandwidth):
    '''
    bandwidth unit is KB
    '''
    timeout = TEST_TIME + 30
    vm_ip = vm_inv.vmNics[0].ip
    file_size = bandwidth * TEST_TIME
    seek_size = file_size / 1024 - 1
    cmd = 'dd if=/dev/zero of=%s bs=1M count=1 seek=%d' \
            % (test_file, seek_size)
    os.system(cmd)
    cmd = 'scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s %s:/dev/null' \
            % (test_file, vm_ip)
    start_time = time.time()
    if execute_shell_in_process(cmd, timeout) != 0:
        test_util.test_fail('scp test file failed')

    end_time = time.time()
    os.system('rm -f %s' % test_file)

    scp_time = end_time - start_time
    if scp_time < TEST_TIME:
        test_util.test_fail('network inbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth))
    else:
        test_util.test_logger('network inbound QOS test file pass, since the scp time: %d is bigger than the expected test time: %d. It means the bandwidth limitation: %d KB/s is effect. ' % (scp_time, TEST_TIME, bandwidth))

    return True
    def check(self):
        super(zstack_kvm_lbl_checker, self).check()
        self.vm_nic_uuids = self.lbl.get_vm_nics_uuid()
        self.algorithm = self.lbl.get_algorithm()
        self.vm_list = []
        self.vm_ip_test_dict = {}

        for vm_nic_uuid in self.vm_nic_uuids:
            vm = test_lib.lib_get_vm_by_nic(vm_nic_uuid)
            if vm.state == 'Running':
                nic_ip = test_lib.lib_get_nic_by_uuid(vm_nic_uuid).ip
                self.vm_ip_test_dict[nic_ip] = 0
                self.vm_list.append(vm)

        if not self.vm_list:
            test_util.test_logger('There is not living vm for load balancer test')
            return self.judge(False)

        cond = res_ops.gen_query_conditions('listeners.uuid', '=', self.lbl_uuid)
        vip_uuid = res_ops.query_resource(res_ops.LOAD_BALANCER, cond)[0].vipUuid
        cond = res_ops.gen_query_conditions('uuid', '=', vip_uuid)
        self.vip_ip = res_ops.query_resource(res_ops.VIP, cond)[0].ip

        if not len(self.vm_list) > 1:
            self.do_so_check()
            return

        if self.algorithm == lb_header.LB_ALGORITHM_RR:
            self.do_rr_check()
        elif self.algorithm == lb_header.LB_ALGORITHM_LC:
            #self.do_lc_check()
            #If not consider long connection, leastconn is same as round robin.
            self.do_rr_check()
        elif self.algorithm == lb_header.LB_ALGORITHM_SO:
            self.do_so_check()
Ejemplo n.º 19
0
def cleanup_none_vm_volumes_violently():
    session_uuid = acc_ops.login_as_admin()
    try:
        priSto_host_list = {}
        result = res_ops.get_resource(res_ops.VOLUME, session_uuid)
        for volume in result:
            if not volume.installPath:
                continue
            volume_path = os.path.dirname(volume.installPath)
            # VM volume has been cleanup in destroy_vm_and_storage_violently()
            if not volume.hasattr("vmInstanceUuid"):
                pri_sto_uuid = volume.primaryStorageUuid
                if priSto_host_list.has_key(pri_sto_uuid):
                    host_ip = priSto_host_list[pri_sto_uuid]
                else:
                    # TODO: need to add multi hosts, if primary storage is local storage.
                    host = _get_host_from_primary_storage(pri_sto_uuid, session_uuid)
                    host_ip = host.managementIp
                    priSto_host_list[pri_sto_uuid] = host_ip
                thread = threading.Thread(target=_delete_file, args=(host_ip, volume_path))
                thread.start()

        while threading.active_count() > 1:
            time.sleep(0.1)

    except Exception as e:
        test_util.test_logger("cleanup volumes violently meet exception")
        traceback.print_exc(file=sys.stdout)
        raise e
    finally:
        acc_ops.logout(session_uuid)
def get_host(content, name=None):
    from pyVmomi import vim
    host = get_obj(content, [vim.HostSystem], name=name)
    if isinstance(host, list):
        test_util.test_logger("do not find host named %s, now return all host" % name)
        return host
    return [host]
def get_vm(content, name=None):
    from pyVmomi import vim
    vm = get_obj(content, [vim.VirtualMachine], name=name)
    if isinstance(vm, list):
        test_util.test_logger("do not find vm named %s, now return all vm" % name)
        return vm
    return [vm]
def get_datacenter(content, name=None):
    from pyVmomi import vim
    dc = get_obj(content, [vim.Datacenter], name=name)
    if isinstance(dc, list):
        test_util.test_logger("do not find datacenter named %s, now return all datacenter" % name)
        return dc
    return [dc]
    def create_checker(self, test_obj): 
        kvm_volume_checker_chain = checker_header.CheckerChain()
        checker_dict = {}
        if test_obj.state == volume_header.CREATED:
            checker_dict[db_checker.zstack_volume_db_checker] = True
            checker_dict[volume_checker.zstack_kvm_volume_file_checker] = False

        elif test_obj.state == volume_header.ATTACHED:
            checker_dict[db_checker.zstack_volume_db_checker] = True
            checker_dict[volume_checker.zstack_kvm_volume_file_checker] = True
            if not test_obj.target_vm.state == vm_header.DESTROYED:
                checker_dict[db_checker.zstack_volume_attach_db_checker] = True
                if test_obj.target_vm.state == vm_header.RUNNING:
                    checker_dict[volume_checker.zstack_kvm_volume_attach_checker] = True
            else:
                checker_dict[db_checker.zstack_volume_attach_db_checker] = False

        elif test_obj.state == volume_header.DETACHED:
            checker_dict[db_checker.zstack_volume_db_checker] = True
            checker_dict[db_checker.zstack_volume_attach_db_checker] = False
            checker_dict[volume_checker.zstack_kvm_volume_attach_checker] = False
            checker_dict[volume_checker.zstack_kvm_volume_file_checker] = True

        elif test_obj.state == volume_header.DELETED:
            test_util.test_logger('volume has been deleted: %s' % test_obj.volume.uuid)
            checker_dict[db_checker.zstack_volume_db_checker] = False
            checker_dict[volume_checker.zstack_kvm_volume_file_checker] = False

        kvm_volume_checker_chain.add_checker_dict(checker_dict, test_obj)
        return kvm_volume_checker_chain
    def check_nfs(self, volume, volume_installPath):
        host = test_lib.lib_get_volume_object_host(self.test_obj)
        if not host:
            test_util.test_logger('Check result: can not find Host, who is belonged to same Zone Uuid of [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
            return self.judge(False)

        self.check_file_exist(volume, volume_installPath, host)
Ejemplo n.º 25
0
def dump_zstack_deployment_config(deployConfig = None):
    '''
    deployConfig is the original zstack config. We need this conifg to set 
    username/password, as they are not get from ZStack API

    will return an xmlobject
    '''
    if not deployConfig:
        deployConfig = xmlobject.XmlObject('fake')

    root_xml = etree.Element("deployerConfig")
    session_uuid = account_operations.login_as_admin()
    try:
        add_nodes_config(root_xml, deployConfig.nodes__, session_uuid)
        add_sftp_backup_stroage_config(root_xml, \
                deployConfig.backupStorages__, session_uuid)
        add_instance_offering_config(root_xml, session_uuid)
        add_disk_offering_config(root_xml, session_uuid)
        add_image_config(root_xml, deployConfig.images__, session_uuid)
        add_zone_config(root_xml, deployConfig.zones, session_uuid)
    except Exception as e:
        test_util.test_logger('[Error] export zstack deployment configuration meets exception.')
        traceback.print_exc(file=sys.stdout)
        raise e
    finally:
        account_operations.logout(session_uuid)

    return root_xml
    def check(self):
        super(zstack_kvm_vm_snat_checker, self).check()
        vm = self.test_obj.vm
        test_lib.lib_install_testagent_to_vr(vm)
        host = test_lib.lib_get_vm_host(vm)

        vm_cmd_result = None
        vr_vms = test_lib.lib_find_vr_by_vm(vm)
        test_lib.lib_set_vm_host_l2_ip(vm)
        for vr_vm in vr_vms:
            test_util.test_logger("Begin to check [vm:] %s SNAT" % vm.uuid)
            nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm)
            if not 'SNAT' in test_lib.lib_get_l3_service_type(nic.l3NetworkUuid):
                test_util.test_logger("Skip [VR:] %s, since it doesn't provide SNAT service" % vr_vm.uuid)
                continue

            ping_target = test_lib.test_config.pingTestTarget.text_
            #Check if there is a SG rule to block ICMP checking
            if test_lib.lib_is_sg_rule_exist(nic.uuid, None, None, inventory.EGRESS):
                if not test_lib.lib_is_sg_rule_exist(nic.uuid, inventory.ICMP, ping_target, inventory.EGRESS):
                    test_util.test_warn('Skip SNAT checker: because there is ICMP Egress Rule was assigned to [nic:] %s and the allowed target ip is not %s' % (nic.uuid, ping_target))
                    return self.judge(self.exp_result)

            guest_ip = nic.ip
            vm_command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % ping_target
            vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command, self.exp_result)
            if not vm_cmd_result:
                test_util.test_logger('Checker result: FAIL to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid))
                if self.exp_result == True:
                    test_util.test_logger("network connection result is not expected pass, will print VR's network configuration:")
                    test_lib.lib_print_vr_network_conf(vr_vm)
                return self.judge(False)
            else:
                test_util.test_logger('Checker result: SUCCEED to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid))
                return self.judge(True)
Ejemplo n.º 27
0
def check_detach_l2(pre_cluster_uuid, l2_uuid, vm, is_other_cluster):
    l2 = res_ops.get_resource(res_ops.L2_NETWORK, uuid = l2_uuid)[0]

    attached_clusters = l2.attachedClusterUuids

    if pre_cluster_uuid in attached_clusters:
        test_util.test_fail('[cluster:] %s is still in [l2:] %s attached list.'\
                % (pre_cluster_uuid, l2_uuid))

    test_util.test_dsc('start vm again. vm should be started in different cluster, if there has.')
    if attached_clusters :
        if not is_other_cluster:
            test_util.test_fail('There should not be available cluster for [l2:] %s. But catch some.' % l2_uuid)

        vm.start()
        new_cluster_uuid = vm.get_vm().clusterUuid
        if new_cluster_uuid == pre_cluster_uuid : 
            test_util.test_fail('\
            VM start on old [cluster]: %s, which is detached by [l2:] %s ' \
                    % (vm.get_vm().uuid, new_cluster_uuid, l2_uuid))
        vm.check()
    else:
        if is_other_cluster:
            test_util.test_fail('There should be available cluster for [l2:] %s. But did not catch.' % l2_uuid)
        #no cluster is attached with l2. vm will start failure.
        try:
            vm.start()
        except:
            test_util.test_logger('\
Expected: VM start failed, since there is not cluster is attached to [l2]: %s, \
after [cluster:] %s is detached' % (l2_uuid, pre_cluster_uuid))
        else:
            test_util.test_fail('[vm]: %s is Wrongly started up, since there is\
not cluster is attached with [l2]: %s, after previous detaching ops' % \
                (vm.get_vm().uuid, l2_uuid))
def revert_vm_from_backup(group_uuid, session_uuid=None):
    action = api_actions.RevertVmFromVmBackupAction()
    action.groupUuid = group_uuid
    action.timeout = 1800000
    evt = account_operations.execute_action_with_session(action, session_uuid) 
    test_util.test_logger('Revert [volume_uuid:] %s ' %  group_uuid)
    return evt.inventory
    def _create_checking_file(self):
        #make fs for volume, if it doesn't exist
        if not self.parent and not self.child_list:
            test_lib.lib_mkfs_for_volume(self.target_volume.get_volume().uuid, \
                    self.utility_vm.get_vm())

        import tempfile
        with tempfile.NamedTemporaryFile() as script:
            script.write('''
device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1
mkdir -p %s
mount $device %s
mkdir -p %s
touch %s/%s
umount %s
            ''' % (test_lib.WOODPECKER_MOUNT_POINT, \
                    test_lib.WOODPECKER_MOUNT_POINT, \
                    checking_point_folder, checking_point_folder, \
                    self.checking_point, test_lib.WOODPECKER_MOUNT_POINT))
            script.flush()
            test_lib.lib_execute_shell_script_in_vm(self.utility_vm.get_vm(),
                    script.name)

        if self.parent:
            test_util.test_logger('[snapshot:] %s checking file: %s is created.\
Its [parent:] %s' % \
                    (self.snapshot_option.get_name(), \
                        self.checking_point, self.parent.get_snapshot().uuid))
        else:
            test_util.test_logger('[snapshot:] %s checking file: %s is created.'% (self.snapshot_option.get_name(), self.checking_point))
def get_cluster(content, name=None):
    from pyVmomi import vim
    cluster = get_obj(content, [vim.ClusterComputeResource], name=name)
    if isinstance(cluster, list):
        test_util.test_logger("do not find cluster named %s, now return all cluster" % name)
        return cluster
    return [cluster]
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout
    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    max_attempts = test_lib.lib_get_ha_selffencer_maxattempts()
    test_lib.lib_set_ha_selffencer_maxattempts('12')
    storagechecker_timeout = test_lib.lib_get_ha_selffencer_storagechecker_timeout()
    test_lib.lib_set_ha_selffencer_storagechecker_timeout('15')

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_s')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #l3_name = os.environ.get('l3NoVlanNetworkName1')
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    for vr in vrs:
	if test_lib.lib_is_vm_running(vr) == True:
	    vm_ops.start_vm(vr.uuid)
    time.sleep(60)
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', os.environ.get('hostIp'), conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()
    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid
    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
    l2_network_interface = os.environ.get('l2ManagementNetworkInterface')
    cmd = "ifdown %s && sleep 180 && ifup %s" % (l2_network_interface, l2_network_interface)
    host_username = os.environ.get('hostUsername')
    host_password = os.environ.get('hostPassword')
    rsp = test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 180)
    if rsp:
        test_util.test_logger("host may have been shutdown")
    else:
	test_util.test_fail("host is expected to shutdown after its network down for a while")

    test_util.test_logger("wait for 600 seconds")
    time.sleep(600)
    vm.update()
    if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip:
	test_util.test_fail("VM is expected to start running on another host")
    vm.set_state(vm_header.RUNNING)
    vm.check()
    vm.destroy()
    test_lib.lib_set_ha_selffencer_maxattempts(max_attempts)
    test_lib.lib_set_ha_selffencer_storagechecker_timeout(storagechecker_timeout)

    os.system('bash -ex %s %s' % (os.environ.get('hostRecoverScript'), host_ip))
    host_ops.reconnect_host(host_uuid)
    test_util.test_pass('Test VM ha on host failure Success')
def test():
    vm = test_stub.create_vlan_vm()
    #test_obj_dict.add_vm(vm)

    backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm)
    for bs in backup_storage_list:
        if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
            break
        #if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE:
        #    break
        #if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
        #    break
    else:
        vm.destroy()
        vm.expunge()
        test_util.test_skip('Not find image store type backup storage.')
    primary_storage_list = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
    for ps in primary_storage_list:
        if ps.type == "SharedBlock" or ps.type == "AliyunNAS":
            vm.destroy()
            vm.expunge()
            test_util.test_skip('The test is not support sharedblock storage.')

    new_vms = vm.clone(vm_names)
    for new_vm in new_vms:
        test_obj_dict.add_vm(new_vm)

    if len(new_vms) != len(vm_names):
        test_util.test_fail(
            'only %s VMs have been cloned, which is less than required: %s' %
            (len(new_vms), vm_names))

    for new_vm in new_vms:
        new_vm = new_vm.get_vm()
        try:
            vm_names.remove(new_vm.name)
            test_util.test_logger('VM:%s name: %s is found' %
                                  (new_vm.uuid, new_vm.name))
        except:
            test_util.test_fail('%s vm name: %s is not in list: %s' %
                                (new_vm.uuid, new_vm.name, vm_names))

    vm.destroy()
    check_imf2_cmd = "find /|grep imf|grep %s" % (
        test_lib.lib_get_root_volume_uuid(vm.get_vm()))
    host = test_lib.lib_find_host_by_vm(vm.get_vm())
    ret, output, stderr = ssh.execute(check_imf2_cmd, host.managementIp,
                                      "root", "password", False, 22)
    test_util.test_logger('expect imf2 exist: %s,%s' % (output, ret))
    if ret != 0:
        test_util.test_fail('imf2 is expected to exist')

    vm.expunge()
    ret, output, stderr = ssh.execute(check_imf2_cmd, host.managementIp,
                                      "root", "password", False, 22)
    test_util.test_logger('expect imf2 not exist: %s,%s' % (output, ret))
    if ret == 0:
        test_util.test_fail('imf2 is expected to be deleted')

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass('Clone VM Test Success')
Ejemplo n.º 33
0
def deploy_ha_env(scenarioConfig, scenarioFile, deploy_config, config_json, deploy_tool, mn_img):
    prepare_config_json(scenarioConfig, scenarioFile, deploy_config, config_json)
    mn_ha_storage_type = sce_ops.get_mn_ha_storage_type(scenarioConfig, scenarioFile, deploy_config)
    if mn_ha_storage_type == 'ceph':
        os.system('sed -i s/node/ceph-/g %s' %(config_json))
    test_host = get_mn_host(scenarioConfig,scenarioFile)[0]
    test_host_ip = test_host.ip_
    test_host_config = sce_ops.get_scenario_config_vm(test_host.name_, scenarioConfig)
    host_password = test_host_config.imagePassword_
    mn_image_path = "/home/%s/mn.qcow2" % test_host_ip
    installer_path = "/home/%s/zs-ha" % test_host_ip
    config_path = "/home/%s/config.json" % test_host_ip
    ssh.scp_file(config_json, config_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_)
    ssh.scp_file(mn_img, mn_image_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_)
    ssh.scp_file(deploy_tool, installer_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_)

    cmd0 = "chmod a+x %s" % (installer_path)
    test_util.test_logger("[%s] %s" % (test_host_ip, cmd0))
    ssh.execute(cmd0, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)

    if mn_ha_storage_type == 'ceph':

        if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-ceph-3-nets-sep.xml"], ["scenario-config-storage-separate-ceph.xml"]):
            ceph_node_ip = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 0).ip_ 
            mn_image_path = "/home/%s/mn.qcow2" % ceph_node_ip
            ssh.scp_file(mn_img, mn_image_path, ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_)
            cmd0="yum install -y --disablerepo=* --enablerepo=zstack-local qemu-img"
            test_util.test_logger("[%s] %s" % (ceph_node_ip, cmd0))
            ssh.execute(cmd0, ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)
        else:
            ceph_node_ip = test_host_ip

        cmd1="ceph osd pool create zstack 128"
        test_util.test_logger("[%s] %s" % (ceph_node_ip, cmd1))
        ssh.execute(cmd1, ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)

        cmd2="qemu-img convert -f qcow2 -O raw %s rbd:zstack/mnvm.img" % mn_image_path
        test_util.test_logger("[%s] %s" % (ceph_node_ip, cmd2))
        if test_lib.lib_execute_ssh_cmd(ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, cmd2, timeout=7200 ) == False:
            test_util.test_fail("fail to run cmd: %s on %s" %(cmd2, ceph_node_ip))

    elif mn_ha_storage_type == 'nfs':
        prepare_etc_hosts(scenarioConfig, scenarioFile, deploy_config, config_json)
        cmd1 = "cp %s /storage/mnvm.img" % (mn_image_path)
        test_util.test_logger("[%s] %s" % (test_host_ip, cmd1))
        ssh.execute(cmd1, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)

    elif mn_ha_storage_type == 'fusionstor':
        cmd1 = "lichbd pool create zstack -p nbd"
        test_util.test_logger("[%s] %s" % (test_host_ip, cmd1))
        ssh.execute(cmd1, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)
        cmd2 = "lichbd vol import %s zstack/mnvm.img -p nbd" %(mn_image_path)
        test_util.test_logger("[%s] %s" % (test_host_ip, cmd2))
        ssh.execute(cmd2, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)
        cmd3 = "lich.inspect --localize /default/zstack/mnvm.img 0"
        test_util.test_logger("[%s] %s" % (test_host_ip, cmd3))
        ssh.execute(cmd3, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)

    cmd3='%s install -p %s -c %s' % (installer_path, host_password, config_path)
    test_util.test_logger("[%s] %s" % (test_host_ip, cmd3))
    #ssh.execute(cmd3, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)
    if test_lib.lib_execute_ssh_cmd(test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, cmd3, timeout=3600 ) == False:
        test_util.test_fail("fail to run cmd: %s on %s" %(cmd3, test_host_ip))
Ejemplo n.º 34
0
    host_list = []
    for host in mn_host_list:
        host_config = sce_ops.get_scenario_config_vm(host.name_, scenarioConfig)
        cmd = "virsh list | grep -v paused | grep \"ZStack Management Node VM\""
        try:
            if sce_is_sep_pub():
                vm_list = test_lib.lib_execute_ssh_cmd(host.managementIp_, host_config.imageUsername_, host_config.imagePassword_,cmd)
            else:
                vm_list = test_lib.lib_execute_ssh_cmd(host.ip_, host_config.imageUsername_, host_config.imagePassword_,cmd)
            if vm_list:
                host_list.append(host)
        except Exception, e:
            test_util.test_logger("@@get host exception@@:%s" %(str(e)))
            continue

    test_util.test_logger("@@DEBUG@@: host_list=<%s>" %(str(host_list)))
    return host_list

def get_mn_host(scenarioConfig, scenarioFile):
    mn_host_list = []

    test_util.test_logger("@@DEBUG@@:<scenarioConfig:%s><scenarioFile:%s><scenarioFile is existed: %s>" \
                          %(str(scenarioConfig), str(scenarioFile), str(os.path.exists(scenarioFile))))
    if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
        return mn_host_list

    test_util.test_logger("@@DEBUG@@: after config file exist check")
    for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
        for vm in xmlobject.safe_list(host.vms.vm):
            if xmlobject.has_element(vm, 'mnHostRef'):
                with open(scenarioFile, 'r') as fd:
def test():
    global vm
    global mn_host

    test_stub.skip_if_scenario_not_multiple_networks()

    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger(
        "shutdown host's network [%s] that mn vm is running on" %
        (mn_host[0].ip_))
    test_stub.shutdown_host_network(mn_host[0],
                                    test_lib.all_scenario_config,
                                    downMagt=True)
    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on another host"
    )
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or (new_mn_host_ip != mn_host[0].ip_ and
                                new_mn_host_ip != mn_host[0].managementIp_):
        test_util.test_fail(
            "management network down, mn host should not changed. Expected on [%s] while is on [%s]"
            % (mn_host[0].ip_, new_mn_host_ip))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 120s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start()
    except:
        test_util.test_fail(
            "management node does not recover after its former host's network down"
        )

    test_stub.reopen_host_network(mn_host[0], test_lib.all_scenario_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config,
                                   test_lib.scenario_file)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(
        test_lib.scenario_file, test_lib.all_scenario_config, downMagt=True)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm_inv
    test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
    image_name = os.environ.get('imageName_i_c7_z_1.3')
    iso_path = os.environ.get('iso_path')
    iso_19_path = os.environ.get('iso_19_path')
    iso_10_path = os.environ.get('iso_10_path')
    iso_20_path = os.environ.get('iso_20_path')
    iso_21_path = os.environ.get('iso_21_path')
    zstack_latest_version = os.environ.get('zstackLatestVersion')
    zstack_latest_path = os.environ.get('zstackLatestInstaller')
    vm_name = os.environ.get('vmName')
    upgrade_script_path = os.environ.get('upgradeScript')

    vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
    vm_ip = vm_inv.vmNics[0].ip
    test_lib.lib_wait_target_up(vm_ip, 22)

    test_stub.make_ssh_no_password(vm_ip, tmp_file)

    test_util.test_dsc('Update MN IP')
    test_stub.update_mn_hostname(vm_ip, tmp_file)
    test_stub.update_hosts(vm_ip, tmp_file)
    test_stub.update_mn_ip(vm_ip, tmp_file)
    test_stub.reset_rabbitmq_for_13(vm_ip, tmp_file)
    test_stub.start_mn(vm_ip, tmp_file)
    test_stub.check_installation(vm_ip, tmp_file)

    test_stub.update_19_iso(vm_ip, tmp_file, iso_19_path, upgrade_script_path)

    #pkg_num = 1.4
    release_ver = [
        '1.4', '1.5', '1.6', '1.7', '1.8', '1.9', '1.10', '2.0.0', '2.1.0',
        '2.2.0'
    ]
    curren_num = float(os.environ.get('releasePkgNum'))
    #while pkg_num <= curren_num:
    for pkg_num in release_ver:
        test_util.test_logger('Upgrade zstack to %s' % pkg_num)
        #if str(pkg_num) == '1.7':
        #    test_stub.update_19_iso(vm_ip, tmp_file, iso_19_path, upgrade_script_path)
        if str(pkg_num) == '1.10':
            test_stub.update_10_iso(vm_ip, tmp_file, iso_10_path,
                                    upgrade_script_path)
        if str(pkg_num) == '2.0.0':
            test_stub.update_20_iso(vm_ip, tmp_file, iso_20_path,
                                    upgrade_script_path)
        if str(pkg_num) == '2.1.0':
            test_stub.update_21_iso(vm_ip, tmp_file, iso_21_path,
                                    upgrade_script_path)
        upgrade_pkg = os.environ.get('zstackPkg_%s' % pkg_num)
        test_stub.upgrade_zstack(vm_ip, upgrade_pkg, tmp_file)
        test_stub.start_mn(vm_ip, tmp_file)
        test_stub.check_zstack_version(vm_ip, tmp_file, str(pkg_num))
        #test_stub.check_installation(vm_ip, tmp_file)
        #pkg_num = pkg_num + 0.1

    test_util.test_dsc('Upgrade zstack to latest')

    test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
    test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
    test_stub.start_mn(vm_ip, tmp_file)
    test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
    test_stub.check_installation(vm_ip, tmp_file)

    os.system('rm -f %s' % tmp_file)
    test_stub.destroy_vm_scenario(vm_inv.uuid)
    test_util.test_pass('ZStack upgrade Test Success')
def test():
    global vm
    global host_uuid
    global test_host
    global host_ip
    global max_attempts
    global storagechecker_timeout

    must_ps_list = [inventory.LOCAL_STORAGE_TYPE, 'SharedMountPoint']
    test_lib.skip_test_if_any_ps_not_deployed(must_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('ls_vm_ha_self_start')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    vr_hosts = test_stub.get_host_has_vr()
    mn_hosts = test_stub.get_host_has_mn()
    nfs_hosts = test_stub.get_host_has_nfs()
    if not test_stub.ensure_vm_not_on(vm.get_vm().uuid,
                                      vm.get_vm().hostUuid,
                                      vr_hosts + mn_hosts + nfs_hosts):
        test_util.test_fail("Not find out a suitable host")
    host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid
    test_stub.ensure_all_vrs_on_host(host_uuid)
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #target_host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid
    #for vr in vrs:
    #    if test_lib.lib_find_host_by_vr(vr).managementIp != test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp:
    #        vm_ops.migrate_vm(vr.uuid, target_host_uuid)

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config,
                                        test_lib.scenario_file)
    for host in host_list:
        if host.ip_ == host_ip:
            test_host = host
            break
    if not test_host:
        test_util.test_fail('there is no host with ip %s in scenario file.' %
                            (host_ip))

    test_stub.stop_host(test_host, test_lib.all_scenario_config)

    vm_stop_time = None
    cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start')
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond)
    for i in range(0, 240):
        vm_stop_time = i
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Stopped":
            test_stub.start_host(test_host, test_lib.all_scenario_config)
            test_stub.recover_host_vlan(test_host,
                                        test_lib.all_scenario_config,
                                        test_lib.deploy_config)
            conditions = res_ops.gen_query_conditions('managementIp', '=',
                                                      host_ip)
            kvm_host_uuid = res_ops.query_resource(res_ops.HOST,
                                                   conditions)[0].uuid
            host_ops.reconnect_host(kvm_host_uuid)
            break
        time.sleep(1)
    if vm_stop_time is None:
        vm_stop_time = 240
    for i in range(vm_stop_time, 240):
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Starting":
            break
        time.sleep(1)
    else:
        test_util.test_fail(
            "vm has not been changed to running as expected within 180s.")

    vm.destroy()

    test_util.test_pass(
        'Test checking VM ha and none status when force stop vm Success.')
def test():
    def test_fail(msg):
        os.system('rm -f %s' % tmp_file)
        test_util.test_fail(msg)

    test_util.test_dsc('Create 2 CentOS7 vms to test zstack installation.')
    image_name = os.environ.get('imageName_i_c7')
    vm1 = test_stub.create_vlan_vm(image_name)
    test_obj_dict.add_vm(vm1)
    vm2 = test_stub.create_vlan_vm(image_name)
    test_obj_dict.add_vm(vm2)
    vm1.check()
    vm2.check()

    vm1_inv = vm1.get_vm()
    vm1_ip = vm1_inv.vmNics[0].ip
    vm2_inv = vm2.get_vm()
    vm2_ip = vm2_inv.vmNics[0].ip
    target_file = '/root/zstack-all-in-one.tgz'
    test_stub.prepare_test_env(vm1_inv, target_file)
    ssh_cmd1 = 'ssh  -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm1_ip
    test_stub.only_install_zstack(ssh_cmd1, target_file, tmp_file)

    test_stub.copy_id_dsa(vm1_inv, ssh_cmd1, tmp_file)
    test_stub.copy_id_dsa_pub(vm1_inv)
    test_stub.copy_id_dsa_pub(vm2_inv)

    cmd = '%s "zstack-ctl install_db --host=%s"' % (ssh_cmd1, vm2_ip)
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        test_fail('zstack install db failed in vm:%s' % vm2_inv.uuid)

    cmd = '%s "zstack-ctl deploydb --host=%s"' % (ssh_cmd1, vm2_ip)
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        test_fail('zstack deploy db failed in vm:%s' % vm2_inv.uuid)

    cmd = '%s "zstack-ctl install_rabbitmq --host=%s"' % (ssh_cmd1, vm1_ip)
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        test_fail('zstack install rabbitmq failed in vm:%s' % vm1_inv.uuid)

    cmd = '%s "zstack-ctl install_management_node --host=%s"' % (ssh_cmd1,
                                                                 vm2_ip)
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        test_fail('zstack install mn failed in vm:%s' % vm2_inv.uuid)

    cmd = '%s "zstack-ctl install_ui --host=%s"' % (ssh_cmd1, vm2_ip)
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        test_fail('zstack install ui failed in vm:%s' % vm2_inv.uuid)

    cmd = '%s "zstack-ctl install_ui --host=%s"' % (ssh_cmd1, vm1_ip)
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        test_fail('zstack install ui failed in vm:%s' % vm1_inv.uuid)

    cmd = '%s "zstack-ctl start_node"' % ssh_cmd1
    process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
    if process_result != 0:
        if 'no management-node-ready message received within' in open(
                tmp_file).read():
            times = 30
            cmd = '%s "zstack-ctl status"' % ssh_cmd1
            while (times > 0):
                time.sleep(10)
                process_result = test_stub.execute_shell_in_process(
                    cmd, tmp_file, 10, True)
                times -= 1
                if process_result == 0:
                    test_util.test_logger(
                        "management node start after extra %d seconds" %
                        ((30 - times + 1) * 10))
                    break
            else:
                test_fail('start node failed in vm:%s' % vm1_inv.uuid)

    test_stub.check_installation(ssh_cmd1, tmp_file)

    os.system('rm -f %s' % tmp_file)
    vm1.destroy()
    test_obj_dict.rm_vm(vm1)
    vm2.destroy()
    test_obj_dict.rm_vm(vm2)
    test_util.test_pass(
        'ZStack multi nodes installation Test Success on 2 CentOS7.')
def test():
    global vm
    global mn_host
    for i in range(0, 10):
        test_util.test_logger("destroy mn vm round %s" % (i))

        mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                              test_lib.scenario_file)
        if len(mn_host) != 1:
            test_util.test_fail('MN VM is running on %d host(s)' %
                                len(mn_host))
        test_util.test_logger("destroy mn vm on host [%s]" % mn_host[0].ip_)
        test_stub.destroy_mn_vm(mn_host[0], test_lib.all_scenario_config)
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 0:
            test_util.test_logger("mn vm was destroyed successfully")
        else:
            test_util.test_fail("mn vm was not destroyed successfully")

        test_util.test_logger(
            "wait for 20 seconds to see if management node VM starts on one host"
        )
        time.sleep(20)

        new_mn_host_ip = test_stub.get_host_by_consul_leader(
            test_lib.all_scenario_config, test_lib.scenario_file)
        if new_mn_host_ip == "" or new_mn_host_ip != mn_host[0].ip_:
            test_util.test_fail(
                "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
                % (new_mn_host_ip, mn_host[0].ip_))

        count = 60
        while count > 0:
            new_mn_host = test_stub.get_host_by_mn_vm(
                test_lib.all_scenario_config, test_lib.scenario_file)
            if len(new_mn_host) == 1:
                test_util.test_logger(
                    "management node VM run after its former host down for 30s"
                )
                break
            elif len(new_mn_host) > 1:
                test_util.test_fail(
                    "management node VM runs on more than one host after its former host down"
                )
            time.sleep(5)
            count -= 1

        if len(new_mn_host) == 0:
            test_util.test_fail(
                "management node VM does not run after its former host down for 30s"
            )
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )

        test_util.test_logger(
            "wait for 5 minutes to see if management node starts again")
        try:
            node_ops.wait_for_management_server_start(300)
        except:
            test_util.test_fail(
                "management node does not recover after mn vm was destroyed")

        test_util.test_logger("try to create vm, timeout is 30s")
        time_out = 30
        while time_out > 0:
            try:
                vm = test_stub.create_basic_vm()
                break
            except:
                time.sleep(1)
                time_out -= 1
        if time_out == 0:
            test_util.test_fail('Fail to create vm after mn is ready')

        vm.check()
        vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 40
0
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    test_stub.skip_if_not_storage_network_separate(
        test_lib.all_scenario_config)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    vr_hosts = test_stub.get_host_has_vr()
    mn_hosts = test_stub.get_host_has_mn()
    nfs_hosts = test_stub.get_host_has_nfs()
    if not test_stub.ensure_vm_not_on(vm.get_vm().uuid,
                                      vm.get_vm().hostUuid,
                                      vr_hosts + mn_hosts + nfs_hosts):
        test_util.test_fail("Not find out a suitable host")
    host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid
    test_stub.ensure_all_vrs_on_host(host_uuid)

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    #test_stub.down_host_network(host_ip, test_lib.all_scenario_config)
    host_username = os.environ.get('hostUsername')
    host_password = os.environ.get('hostPassword')
    t = test_stub.async_exec_ifconfig_nic_down_up(300, host_ip, host_username,
                                                  host_password, "zsn1")

    time.sleep(300)

    vm.update()
    if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip:
        test_util.test_fail("VM is expected to start running on another host")
    vm.set_state(vm_header.RUNNING)
    vm.check()

    if test_lib.lib_get_vm_last_host(vm.get_vm()).managementIp != host_ip:
        test_util.test_fail(
            "Migrated VM's last host is expected to be the last host[ip:%s]" %
            (host_ip))

    vm.destroy()

    t.join()

    test_util.test_pass('Test VM ha change to running within 300s Success')
def setup_fusionstor_storages(scenario_config, scenario_file, deploy_config):
    fusionstor_storages = dict()
    for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host):
        for vm in xmlobject.safe_list(host.vms.vm):
            vm_name = vm.name_

            if hasattr(vm, 'backupStorageRef'):
                for backupStorageRef in xmlobject.safe_list(
                        vm.backupStorageRef):
                    print backupStorageRef.text_
                    if backupStorageRef.type_ == 'fusionstor':
                        if fusionstor_storages.has_key(backupStorageRef.text_):
                            if vm_name in fusionstor_storages[
                                    backupStorageRef.text_]:
                                continue
                            else:
                                fusionstor_storages[
                                    backupStorageRef.text_].append(vm_name)
                        else:
                            fusionstor_storages[backupStorageRef.text_] = [
                                vm_name
                            ]
            if hasattr(vm, 'primaryStorageRef'):
                for primaryStorageRef in xmlobject.safe_list(
                        vm.primaryStorageRef):
                    print primaryStorageRef.text_
                    for zone in xmlobject.safe_list(deploy_config.zones.zone):
                        if primaryStorageRef.type_ == 'fusionstor':
                            if fusionstor_storages.has_key(
                                    backupStorageRef.text_):
                                if vm_name in fusionstor_storages[
                                        backupStorageRef.text_]:
                                    continue
                                else:
                                    fusionstor_storages[
                                        backupStorageRef.text_].append(vm_name)
                            else:
                                fusionstor_storages[backupStorageRef.text_] = [
                                    vm_name
                                ]
    if len(fusionstor_storages) > 0:
        test_util.test_logger('get fusionstor pkg')
        fusionstorPkg = os.environ['fusionstorPkg']
    else:
        test_util.test_logger('no fusionstor pkg return here')
        return

    for fusionstor_storage in fusionstor_storages:
        test_util.test_logger('setup fusionstor [%s] service.' %
                              (fusionstor_storage))
        node1_name = fusionstor_storages[fusionstor_storage][0]
        node1_config = get_scenario_config_vm(node1_name, scenario_config)
        node1_ip = get_scenario_file_vm(node1_name, scenario_file).ip_
        node_host = get_deploy_host(node1_config.hostRef.text_, deploy_config)
        if not hasattr(node_host, 'port_') or node_host.port_ == '22':
            node_host.port_ = '22'
        vm_ips = ''
        for fusionstor_node in fusionstor_storages[fusionstor_storage]:
            vm_nic_id = get_fusionstor_storages_nic_id(fusionstor_storage,
                                                       scenario_config)
            vm = get_scenario_file_vm(fusionstor_node, scenario_file)
            if vm_nic_id == None:
                vm_ips += vm.ip_ + ' '
            else:
                vm_ips += vm.ips.ip[vm_nic_id].ip_ + ' '

        ssh.scp_file("%s/%s" % (os.environ.get('woodpecker_root_path'),
                                '/tools/setup_fusionstor_nodes.sh'),
                     '/tmp/setup_fusionstor_nodes.sh',
                     node1_ip,
                     node1_config.imageUsername_,
                     node1_config.imagePassword_,
                     port=int(node_host.port_))
        ssh.scp_file(fusionstorPkg,
                     fusionstorPkg,
                     node1_ip,
                     node1_config.imageUsername_,
                     node1_config.imagePassword_,
                     port=int(node_host.port_))
        cmd = "bash -ex /tmp/setup_fusionstor_nodes.sh %s %s" % (
            (fusionstorPkg), (vm_ips))
        try:
            ssh.execute(cmd, node1_ip, node1_config.imageUsername_,
                        node1_config.imagePassword_, True,
                        int(node_host.port_))
        except Exception as e:
            print str(e)
        ssh.execute(cmd, node1_ip, node1_config.imageUsername_,
                    node1_config.imagePassword_, True, int(node_host.port_))
Ejemplo n.º 42
0
def test():
    global session_to
    global session_mc

    session_to = con_ops.change_global_config('identity', 'session.timeout',
                                              '720000')
    session_mc = con_ops.change_global_config('identity',
                                              'session.maxConcurrent', '10000')
    test_util.test_dsc('Create test vm as utility vm')
    vm = test_stub.create_vlan_vm()
    test_obj_dict.add_vm(vm)
    #use root volume to skip add_checking_point
    test_util.test_dsc('Use root volume for snapshot testing')
    root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm())
    root_volume = zstack_volume_header.ZstackTestVolume()
    root_volume.set_volume(root_volume_inv)
    root_volume.set_state(volume_header.ATTACHED)
    root_volume.set_target_vm(vm)
    test_obj_dict.add_volume(root_volume)
    vm.check()

    snapshots = test_obj_dict.get_volume_snapshot(
        root_volume.get_volume().uuid)
    snapshots.set_utility_vm(vm)

    ori_num = 100
    index = 1
    while index < 101:
        thread = threading.Thread(target=snapshots.create_snapshot,
                                  args=('create_snapshot%s' % str(index), ))
        thread.start()
        index += 1

    while threading.activeCount() > 1:
        time.sleep(0.1)

    cond = res_ops.gen_query_conditions('volumeUuid', '=',
                                        root_volume.get_volume().uuid)
    sps_num = res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond)

    if sps_num != ori_num:
        test_util_test_fail(
            'Create %d snapshots, but only %d snapshots were successfully created'
            % (ori_num, sps_num))

    test_num = 100
    snapshot_list = snapshots.get_snapshot_list()
    for index in range(test_num):
        thread_1 = threading.Thread(target=snapshots.delete_snapshot,
                                    args=(random.choice(snapshot_list), ))
        thread_2 = threading.Thread(target=snapshots.use_snapshot,
                                    args=(random.choice(snapshot_list), ))
        thread_1.start()
        thread_2.start()

    while threading.activeCount() > 1:
        time.sleep(0.1)

    #snapshot.check() doesn't work for root volume
    #snapshots.check()
    #check if snapshot exists in ps (host) install_path
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
    if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE or ps.type == inventory.LOCAL_STORAGE_TYPE:
        cond = res_ops.gen_query_conditions('volumeUuid', '=',
                                            root_volume.get_volume().uuid)
        sps_in_database = res_ops.query_resource(res_ops.VOLUME_SNAPSHOT, cond)
        host = test_lib.lib_get_vm_host(vm.get_vm())
        for snapshot_inv in sps_in_database:
            sp_ps_install_path = snapshot_inv.primaryStorageInstallPath
            if test_lib.lib_check_file_exist(host, sp_ps_install_path):
                test_util.test_logger(
                    'Check result: snapshot %s is found in host %s in path %s'
                    %
                    (snapshot_inv.name, host.managementIp, sp_ps_install_path))
            else:
                test_lib.lib_robot_cleanup(test_obj_dict)
                test_util.test_fail(
                    'Check result: snapshot %s is not found in host %s in path %s'
                    %
                    (snapshot_inv.name, host.managementIp, sp_ps_install_path))
    else:
        test_util.test_logger(
            'Skip check file install path for %s primary storage' % (ps.type))

    test_lib.lib_robot_cleanup(test_obj_dict)
    test_util.test_pass(
        'Test delete and revert 100 snapshots simultaneously success')
def setup_host_vm(vm_inv, vm_config, deploy_config):
    vm_ip = test_lib.lib_get_vm_nic_by_l3(vm_inv,
                                          vm_inv.defaultL3NetworkUuid).ip
    cmd = 'hostnamectl set-hostname %s' % (vm_ip.replace('.', '-'))
    ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_,
                True, 22)

    udev_config = ''
    nic_id = 0
    for l3network in xmlobject.safe_list(vm_config.l3Networks.l3Network):
        for vmnic in vm_inv.vmNics:
            if vmnic.l3NetworkUuid == l3network.uuid_:
                vmnic_mac = vmnic.mac
                break
        nic_name = None
        if hasattr(l3network, 'l2NetworkRef'):
            for l2networkref in xmlobject.safe_list(l3network.l2NetworkRef):
                nic_name = get_ref_l2_nic_name(l2networkref.text_,
                                               deploy_config)
                if nic_name.find('.') < 0:
                    break
        if nic_name == None:
            nic_name = "eth%s" % (nic_id)
        nic_id += 1

        udev_config = udev_config + '\\nACTION=="add", SUBSYSTEM=="net", DRIVERS=="?*", ATTR{type}=="1", ATTR{address}=="%s", NAME="%s"' % (
            vmnic_mac, nic_name)

    cmd = 'echo %s > /etc/udev/rules.d/70-persistent-net.rules' % (udev_config)
    ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_,
                True, 22)

    for l3network in xmlobject.safe_list(vm_config.l3Networks.l3Network):
        if hasattr(l3network, 'l2NetworkRef'):
            for l2networkref in xmlobject.safe_list(l3network.l2NetworkRef):
                nic_name = get_ref_l2_nic_name(l2networkref.text_,
                                               deploy_config)
                if nic_name.find('.') >= 0:
                    vlan = nic_name.split('.')[1]
                    test_util.test_logger('[vm:] %s %s is created.' %
                                          (vm_ip, nic_name))
                    cmd = 'vconfig add %s %s' % (nic_name.split('.')[0], vlan)
                    ssh.execute(cmd, vm_ip, vm_config.imageUsername_,
                                vm_config.imagePassword_, True, 22)

    host = get_deploy_host(vm_config.hostRef.text_, deploy_config)
    if hasattr(host, 'port_') and host.port_ != '22':
        cmd = "sed -i 's/#Port 22/Port %s/g' /etc/ssh/sshd_config && iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT && service sshd restart" % (
            host.port_, host.port_)
        ssh.execute(cmd, vm_ip, vm_config.imageUsername_,
                    vm_config.imagePassword_, True, 22)
    else:
        host.port_ = '22'

    if host.username_ != 'root':
        cmd = 'adduser %s && echo -e %s\\\\n%s | passwd %s' % (
            host.username_, host.password_, host.password_, host.username_)
        ssh.execute(cmd, vm_ip, vm_config.imageUsername_,
                    vm_config.imagePassword_, True, int(host.port_))
        cmd = "echo '%s        ALL=(ALL)       NOPASSWD: ALL' >> /etc/sudoers" % (
            host.username_)
        ssh.execute(cmd, vm_ip, vm_config.imageUsername_,
                    vm_config.imagePassword_, True, int(host.port_))
def setup_ocfs2smp_primary_storages(scenario_config, scenario_file,
                                    deploy_config, vm_inv_lst, vm_cfg_lst):
    ocfs2_storages = dict()
    smp_url = None
    for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host):
        for vm in xmlobject.safe_list(host.vms.vm):
            vm_name = vm.name_
            if hasattr(vm, 'primaryStorageRef'):
                for primaryStorageRef in xmlobject.safe_list(
                        vm.primaryStorageRef):
                    for zone in xmlobject.safe_list(deploy_config.zones.zone):
                        if primaryStorageRef.type_ == 'ocfs2smp':
                            if ocfs2_storages.has_key(primaryStorageRef.text_):
                                if vm_name in ocfs2_storages[
                                        primaryStorageRef.text_]:
                                    continue
                                else:
                                    ocfs2_storages[primaryStorageRef.
                                                   text_].append(vm_name)
                            else:
                                ocfs2_storages[primaryStorageRef.text_] = [
                                    vm_name
                                ]
                                if hasattr(primaryStorageRef, 'url_'):
                                    smp_url = primaryStorageRef.url_

    for ocfs2_storage in ocfs2_storages:
        test_util.test_logger('setup ocfs2 [%s] service.' % (ocfs2_storage))
        node1_name = ocfs2_storages[ocfs2_storage][0]
        node1_config = get_scenario_config_vm(node1_name, scenario_config)
        #node1_ip = get_scenario_file_vm(node1_name, scenario_file).ip_
        node_host = get_deploy_host(node1_config.hostRef.text_, deploy_config)
        if not hasattr(node_host, 'port_') or node_host.port_ == '22':
            node_host.port_ = '22'

        vm_ips = ''
        for ocfs2_node in ocfs2_storages[ocfs2_storage]:
            vm_nic_id = get_ceph_storages_nic_id(ocfs2_storage,
                                                 scenario_config)
            vm = get_scenario_file_vm(ocfs2_node, scenario_file)
            if vm_nic_id == None:
                vm_ips += vm.ip_ + ' '
            else:
                vm_ips += vm.ips.ip[vm_nic_id].ip_ + ' '
        #ssh.scp_file("%s/%s" % (os.environ.get('woodpecker_root_path'), '/tools/setup_ocfs2.sh'), '/tmp/setup_ocfs2.sh', node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, port=int(node_host.port_))
        import commands
        status, woodpecker_ip = commands.getstatusoutput(
            "ip addr show eth0 | sed -n '3p' | awk '{print $2}' | awk -F / '{print $1}'"
        )
        if smp_url:
            cmd = "SMP_URL=%s bash %s/%s %s" % (
                smp_url, os.environ.get('woodpecker_root_path'),
                '/tools/setup_ocfs2.sh', vm_ips)
        else:
            cmd = "bash %s/%s %s" % (os.environ.get('woodpecker_root_path'),
                                     '/tools/setup_ocfs2.sh', vm_ips)

        ssh.execute(cmd, woodpecker_ip, node1_config.imageUsername_,
                    node1_config.imagePassword_, True, int(node_host.port_))

    if ocfs2_storages:
        for vm_inv, vm_config in zip(vm_inv_lst, vm_cfg_lst):
            recover_after_host_vm_reboot(vm_inv, vm_config, deploy_config)
Ejemplo n.º 45
0
def env_recover():
    test_util.test_logger("recover consul on host: %s" % (mn_host[0].ip_))
    test_stub.start_consul(mn_host[0], test_lib.all_scenario_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
def setup_ceph_storages(scenario_config, scenario_file, deploy_config):
    ceph_storages = dict()
    for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host):
        for vm in xmlobject.safe_list(host.vms.vm):
            vm_name = vm.name_

            if hasattr(vm, 'backupStorageRef'):
                for backupStorageRef in xmlobject.safe_list(
                        vm.backupStorageRef):
                    print backupStorageRef.text_
                    if backupStorageRef.type_ == 'ceph':
                        if ceph_storages.has_key(backupStorageRef.text_):
                            if vm_name in ceph_storages[
                                    backupStorageRef.text_]:
                                continue
                            else:
                                ceph_storages[backupStorageRef.text_].append(
                                    vm_name)
                        else:
                            ceph_storages[backupStorageRef.text_] = [vm_name]

            if hasattr(vm, 'primaryStorageRef'):
                for primaryStorageRef in xmlobject.safe_list(
                        vm.primaryStorageRef):
                    print primaryStorageRef.text_
                    for zone in xmlobject.safe_list(deploy_config.zones.zone):
                        if primaryStorageRef.type_ == 'ceph':
                            if ceph_storages.has_key(backupStorageRef.text_):
                                if vm_name in ceph_storages[
                                        backupStorageRef.text_]:
                                    continue
                                else:
                                    ceph_storages[
                                        backupStorageRef.text_].append(vm_name)
                            else:
                                ceph_storages[backupStorageRef.text_] = [
                                    vm_name
                                ]

    for ceph_storage in ceph_storages:
        test_util.test_logger('setup ceph [%s] service.' % (ceph_storage))
        node1_name = ceph_storages[ceph_storage][0]
        node1_config = get_scenario_config_vm(node1_name, scenario_config)
        node1_ip = get_scenario_file_vm(node1_name, scenario_file).ip_
        node_host = get_deploy_host(node1_config.hostRef.text_, deploy_config)
        if not hasattr(node_host, 'port_') or node_host.port_ == '22':
            node_host.port_ = '22'

        vm_ips = ''
        for ceph_node in ceph_storages[ceph_storage]:
            vm_nic_id = get_ceph_storages_nic_id(ceph_storage, scenario_config)
            vm = get_scenario_file_vm(ceph_node, scenario_file)
            if vm_nic_id == None:
                vm_ips += vm.ip_ + ' '
            else:
                vm_ips += vm.ips.ip[vm_nic_id].ip_ + ' '
        ssh.scp_file("%s/%s" % (os.environ.get('woodpecker_root_path'),
                                '/tools/setup_ceph_nodes.sh'),
                     '/tmp/setup_ceph_nodes.sh',
                     node1_ip,
                     node1_config.imageUsername_,
                     node1_config.imagePassword_,
                     port=int(node_host.port_))
        cmd = "bash -ex /tmp/setup_ceph_nodes.sh %s" % (vm_ips)
        ssh.execute(cmd, node1_ip, node1_config.imageUsername_,
                    node1_config.imagePassword_, True, int(node_host.port_))
def test():
    global original_rate
    global new_offering_uuid
    test_util.test_dsc('Test memory allocation and reclaiming.')
    cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
    cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond)
    host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1)
    if not host:
        test_util.test_skip('No Enabled/Connected host was found, skip test.' )
        return True

    host = host[0]
    over_provision_rate = 1
    target_vm_num = 5

    host_res = test_lib.lib_get_cpu_memory_capacity(host_uuids = [host.uuid])
    #avail_mem = host_res.availableMemory * over_provision_rate
    avail_mem = host_res.availableMemory
    if avail_mem <= 1024*1024*1024:
        test_util.test_skip('Available memory is less than 1024MB, skip test.')
        return True

    original_rate = test_lib.lib_set_provision_memory_rate(over_provision_rate)
    host_res = test_lib.lib_get_cpu_memory_capacity(host_uuids = [host.uuid])
    avail_mem = host_res.availableMemory

    test_mem = avail_mem / target_vm_num
    new_offering_mem = test_mem
    new_offering = test_lib.lib_create_instance_offering(memorySize = new_offering_mem)

    new_offering_uuid = new_offering.uuid

    rounds = 0
    while (rounds < 3):
        times = 1
        while (times <= (target_vm_num)):
            thread = threading.Thread(target = parallelly_create_vm, \
                    args = ('parallel_vm_creating_%d' % times, \
                        host.uuid, \
                        new_offering.uuid, ))
            thread.start()

            times += 1

        times = 1
        print 'Running VM: %s ' % len(test_obj_dict.get_vm_list())
        while threading.active_count() > 1:
            check_thread_exception()
            time.sleep(1)
            if times > 5:
                test_util.test_fail('creating vm time exceed 5s')
            times += 1

        check_thread_exception()

        for vm in test_obj_dict.get_all_vm_list():
            try:
                vm.destroy()
                test_obj_dict.rm_vm(vm)
            except Exception as e:
                test_util.test_logger("VM Destroying Failure in memory reclaiming test. :%s " % e)
                raise e

        host_res2 = test_lib.lib_get_cpu_memory_capacity(host_uuids = [host.uuid])
        avail_mem2 = host_res2.availableMemory
        if avail_mem2 != avail_mem:
            test_util.test_fail('Available memory reclaiming is not correct. Current available memory : %d, original available memory: %d , after creating and destroying %d vms. in round: %d' % (avail_mem2, avail_mem, target_vm_num, rounds))

        rounds += 1
    
    test_lib.lib_set_provision_memory_rate(original_rate)
    vm_ops.delete_instance_offering(new_offering_uuid)
    test_lib.lib_robot_cleanup(test_obj_dict)

    test_util.test_pass('Parallel vm creation Test Pass')
Ejemplo n.º 48
0
def test():
    global vm
    global default_mode
    #    default_mode = conf_ops.get_global_config_value('kvm', 'videoType')
    default_mode = conf_ops.change_global_config('vm', 'videoType', 'qxl')
    vm = test_stub.create_sg_vm()
    console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid)
    test_util.test_logger('[vm:] %s console is on %s:%s' %
                          (vm.get_vm().uuid, console.hostIp, console.port))
    display = str(int(console.port) - 5900)
    vm.check()
    vm_mode = test_lib.lib_get_vm_video_type(vm.get_vm())
    if vm_mode != 'qxl':
        test_util.test_fail(
            'VM is expected to work in qxl mode instead of %s' % (vm_mode))
    client = api.connect(console.hostIp + ":" + display)
    client.captureScreen('tmp.png')
    image = Image.open('tmp.png')
    if image.width != 720 or image.height != 400:
        test_util.test_fail(
            "VM is expected to work in 720x400 while its %sx%s" %
            (image.width, image.height))
    box = image.getbbox()
    if box != (0, 18, 403, 79) and box != (0, 18, 403, 80):
        test_util.test_fail(
            "VM is expected to display text in area (0, 18, 403, 79) while it's actually: (%s, %s, %s, %s)"
            % (box[0], box[1], box[2], box[3]))

    test_util.test_logger(
        '[vm:] change vga mode to vga=794 which is 1280x1024')
    cmd = 'sed -i "s/115200$/115200 vga=794/g" /boot/grub2/grub.cfg'
    test_lib.lib_execute_command_in_vm(vm.get_vm(), cmd)
    vm.reboot()
    vm.check()
    client = api.connect(console.hostIp + ":" + display)
    client.captureScreen('tmp.png')
    image = Image.open('tmp.png')
    if image.width != 1280 or image.height != 1024:
        test_util.test_fail(
            "VM is expected to work in 1280x1024 while its %sx%s" %
            (image.width, image.height))
    box = image.getbbox()
    if box != (0, 18, 359, 79) and box != (0, 18, 359, 80):
        test_util.test_fail(
            "VM is expected to display text in area (0, 18, 359, 79) while it's actually: (%s, %s, %s, %s)"
            % (box[0], box[1], box[2], box[3]))

    test_util.test_logger(
        '[vm:] change vga mode to vga=907 which is 2560x1600')
    cmd = 'sed -i "s/vga=794/vga=907/g" /boot/grub2/grub.cfg'
    test_lib.lib_execute_command_in_vm(vm.get_vm(), cmd)
    vm.reboot()
    vm.check()
    client = api.connect(console.hostIp + ":" + display)
    client.captureScreen('tmp.png')
    image = Image.open('tmp.png')
    if image.width != 2560 or image.height != 1600:
        test_util.test_fail(
            "VM is expected to work in 2560x1600 while its %sx%s" %
            (image.width, image.height))
    box = image.getbbox()
    if box != (0, 18, 359, 79) and box != (0, 18, 359, 80):
        test_util.test_fail(
            "VM is expected to display text in area (0, 18, 359, 79) while it's actually: (%s, %s, %s, %s)"
            % (box[0], box[1], box[2], box[3]))

    vm.destroy()
    vm.check()
    conf_ops.change_global_config('vm', 'videoType', default_mode)
    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()
    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host, test_lib.all_scenario_config, downMagt=False)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) == 0:
        test_util.test_fail('MN VM has been murdered, expected result should not be impacted when the separated network is down.')

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 120s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    test_stub.return_pass_ahead_if_3sites("TEST PASS")
    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global test_mn_host_list

    test_util.test_skip("2 hosts down at the same time is not support")
    test_stub.skip_if_scenario_is_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for index in test_mn_host_list:
        test_util.test_logger("force stop host [%s]" %
                              (mn_host_list[index].ip_))
        test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config,
                            'cold')

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    test_util.test_logger("recover host [%s]" %
                          (mn_host_list[test_mn_host_list[-1]]))
    test_stub.recover_host(mn_host_list[test_mn_host_list[-1]],
                           test_lib.all_scenario_config,
                           test_lib.deploy_config)
    test_mn_host_list.pop()

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after recover one mn host")

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file,
                                                  test_lib.all_scenario_config,
                                                  test_lib.deploy_config)
    test_stub.ensure_pss_connected()
    test_stub.ensure_bss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global test_obj_dict

    ova_image_name = os.environ['vcenterDefaultmplate']
    network_pattern1 = os.environ['l3vCenterNoVlanNetworkName']
    disk_offering = test_lib.lib_get_disk_offering_by_name(
        os.environ.get('largeDiskOfferingName'))
    test_util.test_dsc('Create vm and check')
    vm = test_stub.create_vm_in_vcenter(vm_name='test_volume_after_sync_vm',
                                        image_name=ova_image_name,
                                        l3_name=network_pattern1)
    test_obj_dict.add_vm(vm)
    vm.check()
    ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid
    vc_ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid)
    vc_host = test_lib.lib_find_host_by_vm(vm.vm).managementIp

    test_util.test_dsc('Create volumes and check')
    volume_creation_option = test_util.VolumeOption()
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume_creation_option.set_name('vcenter_volume')
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume.attach(vm)
    volume.check()
    volume.detach()
    volume.check()

    volume_creation_option.set_name('vcenter_volume1')
    volume_creation_option.set_primary_storage_uuid(ps_uuid)
    volume1 = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume1)
    volume1.check()
    volume1.attach(vm)
    volume1.check()
    volume1.delete()
    volume1.check()

    test_util.test_dsc('Sync vcenter')
    vcenter_uuid = vct_ops.lib_get_vcenter_by_name(os.environ['vcenter']).uuid
    vct_ops.sync_vcenter(vcenter_uuid)
    time.sleep(5)

    test_util.test_dsc('check volumes after synchronizing vcenter')
    db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid)
    db_volume1 = test_lib.lib_get_volume_by_uuid(volume1.get_volume().uuid)
    if db_volume.status != 'Ready' or db_volume1.status != 'Deleted':
        test_util.test_fail(
            "check data volumes fail after synchronizing vcenter")

    #delete volume file
    volume_installPath = vc_ps.url.split(
        '//')[1] + db_volume.installPath.split('[' + vc_ps.name +
                                               ']')[1].lstrip()
    test_util.test_logger(volume_installPath)
    cmd = 'rm -f %s' % volume_installPath
    vchost_user = os.environ['vchostUser']
    vchost_password = os.environ['vchostpwd']
    result = test_lib.lib_execute_ssh_cmd(vc_host, vchost_user,
                                          vchost_password, cmd, 180)

    test_util.test_dsc('Sync vcenter')
    vct_ops.sync_vcenter(vcenter_uuid)
    time.sleep(5)
    db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid)
    if db_volume:
        test_util.test_fail(
            "check data volumes fail after synchronizing vcenter")

    #cleanup
    vm.destroy()
    vm.expunge()
    volume1.expunge()

    test_util.test_pass("Test sync volume in vcenter passed.")
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.LOCAL_STORAGE_TYPE]
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    test_lib.lib_skip_if_ps_num_is_not_eq_number(2)

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip,
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('ls_vm_ha_self_start')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    test_stub.ensure_host_has_no_vr(host_uuid)

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" % (host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    test_stub.down_host_network(host_ip, test_lib.all_scenario_config)

    vm_stop_time = None
    cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start')
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond)
    for i in range(0, max_time):
        vm_stop_time = i
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Unknown":
            test_stub.up_host_network(host_ip, test_lib.all_scenario_config)
            break
        time.sleep(1)

    if vm_stop_time is None:
        vm_stop_time = max_time

    for i in range(vm_stop_time, max_time):
        if res_ops.query_resource(res_ops.VM_INSTANCE,
                                  cond)[0].state == "Running":
            break
        time.sleep(1)
    else:
        test_util.test_fail(
            "vm has not been changed to running as expected within %s s." %
            (max_time))

    vm.destroy()

    test_util.test_pass('Test VM ha change to running within %s s Success' %
                        (max_time))
def env_recover():
    test_util.test_logger("recover host: %s" % (mn_host[0].ip_))
    test_stub.recover_host(mn_host[0], test_lib.all_scenario_config,
                           test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config,
                                   test_lib.scenario_file)
Ejemplo n.º 54
0
def test():
    # create_instanceoffering_min_cpu with tag_soft
    instance_offering_option = test_util.InstanceOfferingOption()
    instance_offering_option.set_cpuNum(1)
    instance_offering_option.set_memorySize(1 * 1024 * 1024 * 1024)
    instance_offering_option.set_allocatorStrategy(
        "MinimumCPUUsageHostAllocatorStrategy")
    instance_offering_option.set_type("UserVm")
    instance_offering_option.set_name('cpu')
    cpu_off = vm_ops.create_instance_offering(instance_offering_option)
    test_obj_dict.add_instance_offering(cpu_off)
    cpu_tag = tag_ops.create_system_tag(
        resourceType="InstanceOfferingVO",
        resourceUuid=cpu_off.uuid,
        tag="minimumCPUUsageHostAllocatorStrategyMode::soft")

    # create_instanceoffering_min_memory with tag_soft
    instance_offering_option.set_cpuNum(1)
    instance_offering_option.set_memorySize(1 * 1024 * 1024 * 1024)
    instance_offering_option.set_allocatorStrategy(
        "MinimumMemoryUsageHostAllocatorStrategy")
    instance_offering_option.set_type("UserVm")
    instance_offering_option.set_name('memory')
    memory_off = vm_ops.create_instance_offering(instance_offering_option)
    test_obj_dict.add_instance_offering(cpu_off)
    memory_tag = tag_ops.create_system_tag(
        resourceType="InstanceOfferingVO",
        resourceUuid=memory_off.uuid,
        tag="minimumMemoryUsageHostAllocatorStrategyMode::soft")

    # kill prometheus
    cmd = "kill -9 `netstat -nlp | awk -F'[ /]*' '/9090/{print $(NF-2)}'`"
    mn_ip = os.environ["ZSTACK_BUILT_IN_HTTP_SERVER_IP"]
    test_lib.lib_execute_ssh_cmd(mn_ip, "root", "password", cmd)

    condition = res_ops.gen_query_conditions('name', '=', 'ttylinux')
    img_name = res_ops.query_resource(res_ops.IMAGE, condition)[0].name
    l3_name = res_ops.query_resource(res_ops.L3_NETWORK)[0].name
    try:
        vm = test_stub.create_vm_with_instance_offering(
            "cpu_1", img_name, l3_name, cpu_off)
        test_obj_dict.add_vm(vm)
    except Exception as e:
        test_util.test_fail(e)

    try:
        vm = test_stub.create_vm_with_instance_offering(
            "memory_1", img_name, l3_name, memory_off)
        test_obj_dict.add_vm(vm)
    except Exception as e:
        test_util.test_fail(e)

    tag_ops.update_system_tag(
        cpu_tag.uuid, tag="minimumCPUUsageHostAllocatorStrategyMode::hard")
    tag_ops.update_system_tag(
        memory_tag.uuid,
        tag="minimumMemoryUsageHostAllocatorStrategyMode::hard")

    try:
        vm = test_stub.create_vm_with_instance_offering(
            "cpu_2", img_name, l3_name, cpu_off)
        test_obj_dict.add_vm(vm)
        test_util.test_fail("hard model can not create vm")
    except Exception as e:
        test_util.test_logger(e)

    try:
        vm = test_stub.create_vm_with_instance_offering(
            "memory_2", img_name, l3_name, memory_off)
        test_obj_dict.add_vm(vm)
        test_util.test_fail("hard model can not create vm")
    except Exception as e:
        test_util.test_logger(e)

    test_lib.lib_execute_ssh_cmd(mn_ip,
                                 "root",
                                 "password",
                                 "zstack-ctl restart_node",
                                 timeout=300)
    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass(
        "test instanceoffering soft->hard and create vm case pass")
Ejemplo n.º 55
0
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint']
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #l3_name = os.environ.get('l3NoVlanNetworkName1')
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm_status_runnning')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()
    vm.check()

    test_stub.ensure_host_has_no_vr(host_uuid)

    vm_creation_option.set_name('multihost_basic_vm_status_stopped')
    vm2 = test_vm_header.ZstackTestVm()
    vm2.set_creation_option(vm_creation_option)
    vm2.create()
    vm2.stop()
    vm2.check()

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    host_port = test_lib.lib_get_host_port(host_ip)
    test_util.test_logger("host %s is disconnecting" %(host_ip))

    test_stub.down_host_network(host_ip, test_lib.all_scenario_config)

    test_util.test_logger("wait for 180 seconds")
    time.sleep(180)

    test_stub.up_host_network(host_ip, test_lib.all_scenario_config)

    vm.set_state(vm_header.STOPPED)
    vm2.set_state(vm_header.STOPPED)
    vm.check()
    vm2.check()
    vm.destroy()
    vm2.destroy()


    test_util.test_pass('Test vm checking status after network disconnect and connect success')
Ejemplo n.º 56
0
    def create_checker(self, test_obj):
        import zstackwoodpecker.zstack_test.zstack_test_snapshot as \
                zstack_sp_header

        if isinstance(test_obj, vm_header.TestVm):
            checker_chain = VmCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_vm().uuid

        elif isinstance(test_obj, volume_header.TestVolume):
            checker_chain = VolumeCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_volume().uuid

        elif isinstance(test_obj, image_header.TestImage):
            checker_chain = ImageCheckerFactory().create_checker(test_obj)
            #elif isinstance(test_obj, sg_header.TestSecurityGroup):
            #    checker_chain = SecurityGroupCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_image().uuid

        elif isinstance(test_obj, sg_header.TestSecurityGroupVm):
            checker_chain = SecurityGroupCheckerFactory().create_checker(
                test_obj)
            obj_uuid = 'security group vm'

        elif isinstance(test_obj, test_pf_header.TestPortForwarding):
            checker_chain = PortForwardingCheckerFactory().create_checker(
                test_obj)
            obj_uuid = test_obj.get_port_forwarding().uuid

        elif isinstance(test_obj, zstack_test_node.ZstackTestNode):
            checker_chain = NodeCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_test_node().uuid

        elif isinstance(test_obj, host_header.TestHost):
            checker_chain = HostCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_host().uuid

        elif isinstance(test_obj, eip_header.TestEIP):
            checker_chain = EipCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_eip().uuid

        elif isinstance(test_obj, vip_header.TestVip):
            checker_chain = VipCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_vip().uuid

        elif isinstance(test_obj, sp_header.TestSnapshot):
            checker_chain = SnapshotCheckerFactory().create_checker(test_obj)
            obj_uuid = test_obj.get_snapshot().uuid

        elif isinstance(test_obj, zstack_sp_header.ZstackVolumeSnapshot):
            checker_chain = SnapshotCheckerFactory().create_checker(test_obj)
            volume_obj = test_obj.get_target_volume().get_volume()
            if not volume_obj:
                #volume is deleted, but volume snapshot has been backuped.
                obj_uuid = None
            else:
                obj_uuid = volume_obj.uuid

        elif isinstance(test_obj, lb_header.TestLoadBalancer):
            checker_chain = LoadBalancerCheckerFactory().create_checker(
                test_obj)
            obj_uuid = test_obj.get_load_balancer().uuid

        test_util.test_logger('Add checker for [%s:] %s. Checkers are: %s' % \
                (test_obj.__class__.__name__, obj_uuid, checker_chain))
        return checker_chain
def test():
    bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
    bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \
            None, fields=['uuid'])
    if not bss:
        test_util.test_skip("not find available backup storage. Skip test")

    image_option = test_util.ImageOption()
    image_option.set_name('test_image_cache_cleanup')
    image_option.set_format('qcow2')
    image_option.set_mediaType('RootVolumeTemplate')
    image_option.set_url(os.environ.get('imageUrl_s'))
    image_option.set_backup_storage_uuid_list([bss[0].uuid])

    new_image = zstack_image_header.ZstackTestImage()
    new_image.set_creation_option(image_option)

    new_image.add_root_volume_template()

    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid

    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING,
                                                    conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option = test_util.VmOption()
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(new_image.image.uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('test_image_cache_cleanup_vm1')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    test_obj_dict.add_vm(vm)
    vm.check()
    host = test_lib.lib_find_host_by_vm(vm.get_vm())
    ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())

    vm.destroy()
    vm.expunge()

    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected',
                                              conditions)
    conditions = res_ops.gen_query_conditions('uuid', '!=', host.uuid,
                                              conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_name('test_image_cache_cleanup_vm2')
    vm2 = test_vm_header.ZstackTestVm()
    vm2.set_creation_option(vm_creation_option)
    vm2.create()
    host2 = test_lib.lib_find_host_by_vm(vm2.get_vm())
    test_obj_dict.add_vm(vm2)

    if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
        test_util.test_skip(
            'ceph is not directly using image cache, skip test.')

    image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath,
                                                      new_image.image.uuid)
    if not test_lib.lib_check_file_exist(host, image_cache_path):
        test_util.test_fail('image cache is expected to exist')
    if bss[0].type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
        image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath,
                                                   new_image.image.uuid)
        if not test_lib.lib_check_file_exist(host, image_cache_path):
            test_util.test_fail('image cache is expected to exist')

    new_image.delete()
    new_image.expunge()

    ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid)
    if ps.type == inventory.LOCAL_STORAGE_TYPE:
        count = 0
        while True:
            image_cache_path = "%s/imagecache/template/%s/%s.qcow2" % (
                ps.mountPath, new_image.image.uuid, new_image.image.uuid)
            if not test_lib.lib_check_file_exist(host, image_cache_path):
                break
            elif count > 5:
                test_util.test_fail('image cache is expected to be deleted')
            test_util.test_logger('check %s times: image cache still exist' %
                                  (count))
            time.sleep(5)
            count += 1

    vm2.destroy()
    vm2.expunge()
    ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid)

    count = 0
    while True:
        image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath,
                                                          new_image.image.uuid)
        if not test_lib.lib_check_file_exist(host, image_cache_path):
            break
        elif count > 5:
            test_util.test_fail('image cache is expected to be deleted')
        test_util.test_logger('check %s times: image cache still exist' %
                              (count))
        time.sleep(5)
        count += 1

    count = 0
    while True:
        image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath,
                                                   new_image.image.uuid)
        if not test_lib.lib_check_file_exist(host, image_cache_path):
            break
        elif count > 5:
            test_util.test_fail('image cache is expected to be deleted')
        test_util.test_logger('check %s times: image cache still exist' %
                              (count))
        time.sleep(5)
        count += 1

    test_util.test_pass('imagecache cleanup Pass.')
def test():
    global vm
    global host_uuid
    global host_ip
    global max_attempts
    global storagechecker_timeout

    allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint']
    test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)

    if test_lib.lib_get_ha_enable() != 'true':
        test_util.test_skip("vm ha not enabled. Skip test")

    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_net')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    #l3_name = os.environ.get('l3NoVlanNetworkName1')
    l3_name = os.environ.get('l3VlanNetworkName1')
    l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
    test_lib.clean_up_all_vr()
    #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid)
    #vr_host_ips = []
    #for vr in vrs:
    #    vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp)
    #    if test_lib.lib_is_vm_running(vr) != True:
    #        vm_ops.start_vm(vr.uuid)
    #time.sleep(60)

    mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
    conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
    conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
    #for vr_host_ip in vr_host_ips:
    #    conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions)
    host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
    vm_creation_option.set_host_uuid(host_uuid)
    vm_creation_option.set_l3_uuids([l3_net_uuid])
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    vm_creation_option.set_name('multihost_basic_vm')
    vm = test_vm_header.ZstackTestVm()
    vm.set_creation_option(vm_creation_option)
    vm.create()

    test_stub.ensure_host_has_no_vr(host_uuid)

    #vm.check()
    host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
    test_util.test_logger("host %s is disconnecting" %(host_ip))

    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")

    test_stub.down_host_network(host_ip, test_lib.all_scenario_config)

    #Here we wait for 300 seconds for all vms have been killed, but test result show:
    #no need to wait, the reaction of killing the vm is very quickly.
    #test_util.test_logger("wait for 300 seconds")
    #time.sleep(300)
    vm_stop_time = None
    cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid)
    for i in range(0, max_time):
        vm_stop_time = i
        if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Unknown":
            test_stub.up_host_network(host_ip, test_lib.all_scenario_config)
            break
        time.sleep(1)

    if vm_stop_time is None:
        vm_stop_time = max_time
        
    for i in range(vm_stop_time, max_time):
        if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running":
            break
        time.sleep(1)
    else:
        test_util.test_fail("vm has not been changed to running as expected within %s s." %(max_time))

    vm.destroy()

    test_util.test_pass('Test VM ha change to running within 120s Success')
Ejemplo n.º 59
0
def test():
    global test_obj_dict, bs, ps
    #judge whether BS is imagestore
    bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)
    for i in bs:
        if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
	    break
    else:
        test_util.test_skip('Skip test on non-imagestore')

    #judge whether PS is SharedBlock
    ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
    for i in ps:
        if i.type in ['SharedBlock', 'AliyunNAS']:
            test_util.test_skip('Skip test on SharedBlock and AliyunNAS PS')

    volume_creation_option = test_util.VolumeOption()
    test_util.test_dsc('Create volume and check')
    disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
    volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
    volume = test_stub.create_volume(volume_creation_option)
    test_obj_dict.add_volume(volume)
    volume.check()
    volume_uuid = volume.volume.uuid
    vol_size = volume.volume.size
    image_name = os.environ.get('imageName_s')
    l3_name = os.environ.get('l3PublicNetworkName')
    vm = test_stub.create_vm("test_vm", image_name, l3_name)
    #vm.check()
    test_obj_dict.add_vm(vm)
    volume.attach(vm)

    new_vm = vm.clone(['test_vm_clone_with_one_data_volume'], full=True)[0]
    test_obj_dict.add_vm(new_vm)

    volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm))
    if volumes_number != 2:
        test_util.test_fail('Did not find 2 volumes for [vm:] %s. But we assigned 2 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number))
    else:
        test_util.test_logger('Find 2 volumes for [vm:] %s.' % new_vm.vm.uuid)

    #Set_size = 1024*1024*1024*5
    #Vol_ops.resize_data_volume(volume_uuid, set_size)
    #Vm.update()
    #Vol_size_after = test_lib.lib_get_data_volumes(vm.get_vm())[0].size
    #If set_size != vol_size_after:
    #    test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after)

    #Volume.detach()
    #Vm.update()
    #Target_host = test_lib.lib_find_random_host(vm.get_vm())
    #Vol_ops.migrate_volume(volume_uuid, target_host.uuid)

    #Cond = res_ops.gen_query_conditions('uuid', '=', volume_uuid)
    #Data_volume = res_ops.query_resource(res_ops.VOLUME, cond)
    #Vol_size_after = data_volume[0].size
    #If set_size != vol_size_after:
    #    test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after)

    test_lib.lib_error_cleanup(test_obj_dict)
    test_util.test_pass('Test clone vm with one data volume Success')
Ejemplo n.º 60
0
def _delete_files(host_ip, path):
    cmd = host_plugin.HostShellCmd()
    cmd.command = "rm -rf %s*" % path 
    test_util.test_logger("Delete files: %s in Host: %s" % (path, host_ip))
    http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd)