Ejemplo n.º 1
0
def test():
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger("close MN-VM and all zs-ha services in cluster")
    test_stub.stop_zsha(mn_host[0], test_lib.all_scenario_config)
    new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                              test_lib.scenario_file)
    if len(new_mn_host) != 0:
        test_util.test_fail('MN VM is still running after shutdown ha cluster')

    test_util.test_logger("start ha cluster")
    test_stub.start_zsha(mn_host[0], test_lib.all_scenario_config)
    test_util.test_logger("wait 50s for MN VM to start")
    time.sleep(50)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail(
            'MN VM is running on %d host(s) after shutdown and start ha cluster'
            % len(mn_host))
    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after shutdown and start ha cluster"
        )
    test_util.test_pass('Shutdown HA Cluster Test Success')
def test():
    global vm
    global mn_host
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()

    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))

    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip
   
    test_util.test_logger("shutdown host's network [%s] that mn vm is running on" % (mn_host[0].ip_))
    test_stub.shutdown_host_network(mn_host[0], test_lib.all_scenario_config, downMagt=False)
    test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_ or new_mn_host_ip == mn_host[0].managementIp_:
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 120s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    try:
        node_ops.wait_for_management_server_start()
    except:
        test_util.test_fail("management node does not recover after its former host's network down")

    test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
    test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 3
0
def test():
    global vm
    global mn_host

    test_stub.skip_if_scenario_is_multiple_networks()

    for i in range(0, 10):
        test_util.test_logger("shutdown host network round %s" % (i))
        mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(mn_host) != 1:
            test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
        test_util.test_logger("shutdown host's network [%s] that mn vm is running on" % (mn_host[0].ip_))
        test_stub.shutdown_host_network(mn_host[0], test_lib.all_scenario_config)
        test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
        time.sleep(20)
    
        new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
        if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
            test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
        else:
            test_util.test_logger("management node VM run on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
    
        count = 60
        while count > 0:
            new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
            if len(new_mn_host) == 1:
                test_util.test_logger("management node VM run after its former host down for 30s")
                break
            elif len(new_mn_host) > 1:
                test_util.test_fail("management node VM runs on more than one host after its former host down")
            time.sleep(5)
            count -= 1
    
        if len(new_mn_host) == 0:
            test_util.test_fail("management node VM does not run after its former host down for 30s")
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
    
        try:
            node_ops.wait_for_management_server_start()
        except:
            test_util.test_fail("management node does not recover after its former host's network down")
    
        test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
        test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file, test_lib.all_scenario_config, test_lib.deploy_config)
        test_stub.ensure_pss_connected()
        test_stub.ensure_bss_connected()

        vm = test_stub.create_basic_vm()
        vm.check()
        vm.destroy()

        test_stub.reopen_host_network(mn_host[0], test_lib.all_scenario_config)
        test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 4
0
def test():
    global vm
    global mn_host
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger(
        "stop consul for host [%s] that mn vm is running on" %
        (mn_host[0].ip_))
    test_stub.stop_consul(mn_host[0], test_lib.all_scenario_config)
    test_util.test_logger(
        "wait for 50 seconds to see if management node VM starts on another host"
    )
    time.sleep(50)
    try:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 0:
            test_util.test_fail(
                "management node VM does not start after its former host's consul down"
            )
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM starts on more than one host after its former host's consul down"
            )
    except:
        test_util.test_fail(
            "management node VM does not start after its former host's consul down"
        )
    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after its former host's consul down"
        )

    test_util.test_logger("try to create vm, timeout is 30s")
    time_out = 30
    while time_out > 0:
        try:
            vm = test_stub.create_basic_vm()
            break
        except:
            time.sleep(1)
            time_out -= 1
    if time_out == 0:
        test_util.test_fail('Fail to create vm after mn is ready')

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global ori_ip
    global mn_vm_host
    mn_vm_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                             test_lib.scenario_file)
    if len(mn_vm_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_vm_host))

    test_util.test_logger("change mn vm ip in config file of host [%s]" %
                          (mn_vm_host[0].ip_))
    new_mn_vm_ip = "172.20.199.99"
    ori_ip = os.environ.get('ZSTACK_BUILT_IN_HTTP_SERVER_IP')
    test_stub.update_mn_vm_config(mn_vm_host[0], 'Ipaddr', new_mn_vm_ip,
                                  test_lib.all_scenario_config)
    test_stub.destroy_mn_vm(mn_vm_host[0], test_lib.all_scenario_config)
    test_util.test_logger(
        "wait for 40 seconds to see if management node VM starts on another host"
    )
    time.sleep(40)
    try:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 0:
            test_util.test_fail(
                "management node VM does not start after change ip and destroyed"
            )
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM starts on more than one host after change ip and destroyed"
            )
    except:
        test_util.test_fail(
            "management node VM does not start after change ip and destroyed")
    if new_mn_host[0].ip_ != mn_vm_host[0].ip_:
        test_util.test_fail(
            'management node VM starts on another host after change ip and destroyed'
        )

    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = new_mn_vm_ip
    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail("management node does not recover after destroyed")

    test_stub.ensure_hosts_connected()
    test_stub.ensure_pss_connected()
    test_stub.ensure_bss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()
    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger("force shutdown host [%s] that mn vm is running on" % (mn_host[0].ip_))
    test_stub.stop_host(mn_host[0], test_lib.all_scenario_config, 'cold')
    test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 30s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail("management node does not recover after its former host down")

    test_util.test_logger("try to create vm, timeout is 30s")
    time_out = 30
    while time_out > 0:
        try:
            vm = test_stub.create_basic_vm()
            break
        except:
            time.sleep(1)
            time_out -= 1
    if time_out == 0:
        test_util.test_fail('Fail to create vm after mn is ready')
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 7
0
def test():
    if test_lib.scenario_config == None or test_lib.scenario_file ==None:
        test_util.test_fail('Suite Setup Fail without scenario')

    if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file):
        scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config)
        test_util.test_skip('Suite Setup Success')
    if test_lib.scenario_config != None and test_lib.scenario_destroy != None:
        scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy)

    #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. 
    #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases.
    linux.create_vlan_eth("eth0", 10)
    linux.create_vlan_eth("eth0", 11)

    #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run()
    test_lib.setup_plan.deploy_test_agent()
    cmd = host_plugin.CreateVlanDeviceCmd()
    cmd.ethname = 'eth0'
    cmd.vlan = 10
    
    cmd2 = host_plugin.CreateVlanDeviceCmd()
    cmd2.ethname = 'eth0'
    cmd2.vlan = 11
    testHosts = test_lib.lib_get_all_hosts_from_plan()
    if type(testHosts) != type([]):
        testHosts = [testHosts]
    for host in testHosts:
        http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
        http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2)

    config_json = os.environ.get('configJson')
    ha_deploy_tool = os.environ.get('zstackHaInstaller')
    mn_img = os.environ.get('mnImage')
    test_stub.deploy_ha_env(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config,config_json, ha_deploy_tool, mn_img)

    node_operations.wait_for_management_server_start(300)
    ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", os.environ.get('zstackHaVip'), 'root', 'password')
    if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
        os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)

    deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file)
    for host in testHosts:
        os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_))

    if test_lib.lib_get_ha_selffencer_maxattempts() != None:
        test_lib.lib_set_ha_selffencer_maxattempts('60')
	test_lib.lib_set_ha_selffencer_storagechecker_timeout('60')
    test_lib.lib_set_primary_storage_imagecache_gc_interval(1)
    test_lib.lib_set_reserved_memory('8G')
    test_util.test_pass('Suite Setup Success')
def test():
    global vm
    global mn_host
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger("destroy mn vm on host [%s]" % mn_host[0].ip_)
    test_stub.destroy_mn_vm(mn_host[0], test_lib.all_scenario_config)
    new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(new_mn_host) == 0:
        test_util.test_logger("mn vm was destroyed successfully")
    else:
        test_util.test_fail("mn vm was not destroyed successfully")
        
    test_util.test_logger("wait for 20 seconds to see if management node VM starts on one host")
    time.sleep(20)
    try:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 0:
            test_util.test_fail("management node VM does not start after destroyed")
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM starts on more than one host after destroyed")
    except:
        test_util.test_fail("management node VM does not start after destroyed")
    if new_mn_host[0].ip_ != mn_host[0].ip_:
        test_util.test_fail("management node VM was started on another host after destroyed")

    test_util.test_logger("wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail("management node does not recover after mn vm was destroyed")

    test_util.test_logger("try to create vm, timeout is 30s")
    time_out = 30
    while time_out > 0:
        try:
            vm = test_stub.create_basic_vm()
            break
        except:
            time.sleep(1)
            time_out -= 1
    if time_out == 0:
        test_util.test_fail('Fail to create vm after mn is ready')

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 9
0
def wrapper_of_wait_for_management_server_start(wait_start_timeout, EXTRA_SUITE_SETUP_SCRIPT=None):
    import zstackwoodpecker.operations.node_operations as node_operations
    if os.path.basename(os.environ.get('WOODPECKER_SCENARIO_CONFIG_FILE')).strip() == "scenario-config-mnha2.xml":
        test_util.test_logger("@@@DEBUG->IS MNHA2@@@")
        if os.environ.get('zstackHaVip'):
            old_mn_ip = os.environ['zstackHaVip']
        auto_set_mn_ip(test_lib.scenario_file)
        if EXTRA_SUITE_SETUP_SCRIPT and EXTRA_SUITE_SETUP_SCRIPT != "":
            cmd = 'sed -i "s/%s/%s/g" %s' %(old_mn_ip, os.environ['zstackHaVip'], EXTRA_SUITE_SETUP_SCRIPT)
            test_util.test_logger("@@@DEBUG-> run cmd: %s @@@ " %(cmd))
            os.system(cmd)
    try:
        node_operations.wait_for_management_server_start(wait_start_timeout)
    except:
        restart_mn_node_with_long_timeout()
def test():
    global vm
    global mn_host
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger("stop consul for host [%s] that mn vm is running on" % (mn_host[0].ip_))
    test_stub.stop_consul(mn_host[0], test_lib.all_scenario_config)
    test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 30s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    test_util.test_logger("wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail("management node does not recover after its former host's consul down")

    test_stub.ensure_hosts_connected()
    test_stub.ensure_pss_connected()
    test_stub.ensure_bss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host [%s]" % (host.ip_))
        test_stub.stop_host(host, test_lib.all_scenario_config)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index],
                               test_lib.all_scenario_config,
                               test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after MN VM is running")

    test_stub.ensure_hosts_connected(
        exclude_host=[mn_host_list[need_recover_mn_host_list[0]]])
    test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file,
                                                  test_lib.all_scenario_config,
                                                  test_lib.deploy_config)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 12
0
def test():
    global vm
    global ha_vm
    global mn_host
    ha_vm = test_stub.create_ha_vm()
    ha_vm.check()
    ha_vm_host = test_lib.lib_get_vm_host(ha_vm.vm)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    if ha_vm_host.managementIp != mn_host[0].ip_:
        conditions = res_ops.gen_query_conditions('managementIp', '=',
                                                  mn_host[0].ip_)
        host = res_ops.query_resource(res_ops.HOST, conditions)
        ha_vm_host.migrate(host[0].uuid)

    test_util.test_logger("force shutdown host [%s] that mn vm is running on" %
                          (mn_host[0].ip_))
    test_stub.stop_host(mn_host[0], test_lib.all_scenario_config, 'cold')
    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on another host"
    )
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after its former host down")

    conditions = res_ops.gen_query_conditions('managementIp', '=',
                                              mn_host[0].ip_)
    host = res_ops.query_resource(res_ops.HOST, conditions)
    if host[0].status != "Connected":
        test_util.test_fail("Target host:%s is not connected as expected." %
                            (host[0].uuid))

    ha_vm.destroy()

    test_util.test_pass('Create VM Test Success')
Ejemplo n.º 13
0
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list

    test_stub.skip_if_vr_not_vyos("vr")
    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    vm = test_stub.create_basic_vm()
    vm.check()
    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
    vm.set_state(vm_header.RUNNING)
    vm.check()

    for host in mn_host_list:
        test_util.test_logger("shutdown host [%s]" % (host.ip_))
        test_stub.stop_host(host, test_lib.all_scenario_config)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index],
                               test_lib.all_scenario_config,
                               test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after MN VM is running")

    test_util.test_logger("Delay 60s and then check if the vm is running")
    time.sleep(180)
    if test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 300):
        vm.update()
        vm.check()
        vm.destroy()
    else:
        test_util.test_fail(
            "ha vm has not changed to running after 2 hosts recover with 300s")
    test_util.test_pass('Check Never Stop VM Test Success')
def test():
    global vm
    global mn_host

    test_stub.skip_if_scenario_not_multiple_networks()

    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger(
        "shutdown host's network [%s] that mn vm is running on" %
        (mn_host[0].ip_))
    test_stub.shutdown_host_network(mn_host[0],
                                    test_lib.all_scenario_config,
                                    downMagt=True)
    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on another host"
    )
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "" or (new_mn_host_ip != mn_host[0].ip_ and
                                new_mn_host_ip != mn_host[0].managementIp_):
        test_util.test_fail(
            "management network down, mn host should not changed. Expected on [%s] while is on [%s]"
            % (mn_host[0].ip_, new_mn_host_ip))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 120s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start()
    except:
        test_util.test_fail(
            "management node does not recover after its former host's network down"
        )

    test_stub.reopen_host_network(mn_host[0], test_lib.all_scenario_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config,
                                   test_lib.scenario_file)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(
        test_lib.scenario_file, test_lib.all_scenario_config, downMagt=True)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()
    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host,
                                        test_lib.all_scenario_config,
                                        downMagt=False)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) == 0:
        test_util.test_fail(
            'MN VM has been murdered, expected result should not be impacted when the separated network is down.'
        )

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index],
                               test_lib.all_scenario_config,
                               test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 120s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after MN VM is running")

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(
        test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global test_mn_host_list
    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for index in test_mn_host_list:
        test_util.test_logger("shutdown host [%s]" % (mn_host_list[index].ip_))
        test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    test_util.test_logger("recover host [%s]" %
                          (mn_host_list[test_mn_host_list[-1]]))
    test_stub.recover_host(mn_host_list[test_mn_host_list[-1]],
                           test_lib.all_scenario_config,
                           test_lib.deploy_config)
    test_mn_host_list.pop()

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after recover one mn host")

    test_util.test_logger("try to create vm, timeout is 30s")
    time_out = 30
    while time_out > 0:
        try:
            vm = test_stub.create_basic_vm()
            break
        except:
            time.sleep(1)
            time_out -= 1
    if time_out == 0:
        test_util.test_fail('Fail to create vm after mn is ready')

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global test_mn_host_list

    test_util.test_skip("2 hosts down at the same time is not support")
    test_stub.skip_if_scenario_is_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for index in test_mn_host_list:
        test_util.test_logger("force stop host [%s]" %
                              (mn_host_list[index].ip_))
        test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config,
                            'cold')

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    test_util.test_logger("recover host [%s]" %
                          (mn_host_list[test_mn_host_list[-1]]))
    test_stub.recover_host(mn_host_list[test_mn_host_list[-1]],
                           test_lib.all_scenario_config,
                           test_lib.deploy_config)
    test_mn_host_list.pop()

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after recover one mn host")

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file,
                                                  test_lib.all_scenario_config,
                                                  test_lib.deploy_config)
    test_stub.ensure_pss_connected()
    test_stub.ensure_bss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')