Beispiel #1
0
def test():
    global vm
    global mn_host
    mn_ip = os.environ.get('zstackHaVip')
    mn_host = test_stub.get_host_by_mn_vm(mn_ip, test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 1:
        test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
    test_util.test_logger("shutdown host [%s] that mn vm is running on" %
                          (mn_host[0].ip_))
    test_stub.stop_host(mn_host[0], test_lib.all_scenario_config)
    test_util.test_logger(
        "wait for 2 minutes to see if management node starts again")
    try:
        node_ops.warit_for_management_server_start()
    except:
        test_util.test_fail(
            "management node does not recover after its former host down")

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_logger("recover host: %s" % (mn_host[0].ip_))
    test_stub.recover_host(mn_host[0], test_lib.all_scenario_config,
                           test_lib.deploy_config)

    test_util.test_pass('Create VM Test Success')
Beispiel #2
0
def env_recover():
    global need_recover_mn_host_list
    if need_recover_mn_host_list:
        for index in need_recover_mn_host_list:
            test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
            test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
def env_recover():
    global need_recover_mn_host_list
    if need_recover_mn_host_list:
        for index in need_recover_mn_host_list:
            test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
            test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
def env_recover():
    global s_vm0
    global s_vm1
    test_util.test_logger("recover host: %s and %s" % (s_vm0.ip_, s_vm1.ip_))
    test_stub.recover_host(s_vm0, test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.recover_host(s_vm1, test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
    test_stub.exec_zsha2_version(vip_s_vm_cfg_lst[0].ip_, "root", "password")
def env_recover():
    global pub_mn_ip
    global mag_mn_ip
    if need_recover_mn_host_list:
        for index in need_recover_mn_host_list:
            test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
            test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = pub_mn_ip
def env_recover():
    global pub_mn_ip
    global mag_mn_ip
    if need_recover_mn_host_list:
        for index in need_recover_mn_host_list:
            test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
            test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = pub_mn_ip
Beispiel #7
0
def error_cleanup():
    global vm
    if vm:
        try:
            vm.destroy()
        except:
            pass
    test_util.test_logger("recover host: %s" % (mn_host[0].ip_))
    test_stub.recover_host(mn_host[0], test_lib.all_scenario_config,
                           test_lib.deploy_config)
def test():
    global vm
    global mn_host
    for i in range(0, 10):
        test_util.test_logger("force shutdown host round %s" % (i))

        mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(mn_host) != 1:
            test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
        test_util.test_logger("force shutdown host [%s] that mn vm is running on" % (mn_host[0].ip_))
        test_stub.stop_host(mn_host[0], test_lib.all_scenario_config, 'cold')
        test_util.test_logger("wait for 40 seconds to see if management node VM starts on another host")
        time.sleep(20)
    
        new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
        if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
            test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
    
        count = 60
        while count > 0:
            new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
            if len(new_mn_host) == 1:
                test_util.test_logger("management node VM run after its former host down for 30s")
                break
            elif len(new_mn_host) > 1:
                test_util.test_fail("management node VM runs on more than one host after its former host down")
            time.sleep(5)
            count -= 1
    
        if len(new_mn_host) == 0:
            test_util.test_fail("management node VM does not run after its former host down for 30s")
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
    
        #node_ops.wait_for_management_server_start(300)
        test_stub.wrapper_of_wait_for_management_server_start(600)
    
        test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
        test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file, test_lib.all_scenario_config, test_lib.deploy_config)
        test_stub.ensure_bss_connected()
        test_stub.ensure_pss_connected()

        if os.path.basename(os.environ.get('WOODPECKER_SCENARIO_CONFIG_FILE')).strip() == "scenario-config-vpc-ceph-3-sites.xml":
            pass
        else:
            vm = test_stub.create_basic_vm()
            vm.check()
            vm.destroy()
        test_stub.recover_host(mn_host[0], test_lib.all_scenario_config, test_lib.deploy_config)
        test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)

    test_util.test_pass('Create VM Test Success')
Beispiel #9
0
def env_recover():
    test_stub.recover_host(mn_host[0], test_lib.all_scenario_config,
                           test_lib.deploy_config)
Beispiel #10
0
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list

    test_stub.skip_if_vr_not_vyos("vr")
    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    vm = test_stub.create_basic_vm()
    vm.check()
    ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
    vm.set_state(vm_header.RUNNING)
    vm.check()

    for host in mn_host_list:
        test_util.test_logger("shutdown host [%s]" % (host.ip_))
        test_stub.stop_host(host, test_lib.all_scenario_config)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index],
                               test_lib.all_scenario_config,
                               test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after MN VM is running")

    test_util.test_logger("Delay 60s and then check if the vm is running")
    time.sleep(180)
    if test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 300):
        vm.update()
        vm.check()
        vm.destroy()
    else:
        test_util.test_fail(
            "ha vm has not changed to running after 2 hosts recover with 300s")
    test_util.test_pass('Check Never Stop VM Test Success')
Beispiel #11
0
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    ha_vm = test_stub.create_ha_vm()
    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("force stop host [%s]" % (host.ip_))
        test_stub.stop_host(host, test_lib.all_scenario_config, 'cold')

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 30s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.return_pass_ahead_if_3sites("TEST PASS")

    ha_vm.set_state(vm_header.RUNNING)
    ha_vm.check()

    test_util.test_logger("try to create vm, timeout is 30s")
    time_out = 30
    while time_out > 0:
        try:
            vm = test_stub.create_basic_vm()
            break
        except:
            time.sleep(1)
            time_out -= 1
    if time_out == 0:
        test_util.test_fail('Fail to create vm after mn is ready')

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list

    test_stub.skip_if_scenario_not_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host, test_lib.all_scenario_config, downMagt=True)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) == 0:
        test_util.test_fail('MN VM has been murdered!!! Expected should not be impacted when management network is down.')

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 120s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    for index in need_recover_mn_host_list:
        test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=True)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    test_stub.return_pass_ahead_if_3sites("TEST PASS")
    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list

    test_stub.skip_if_scenario_is_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host, test_lib.all_scenario_config)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index],
                               test_lib.all_scenario_config,
                               test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected(
        exclude_host=[mn_host_list[need_recover_mn_host_list[0]]])
    test_stub.ensure_bss_host_connected_from_sep_net_down(
        test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    test_stub.return_pass_ahead_if_3sites("TEST PASS")

    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global test_mn_host_list

    test_util.test_skip("2 hosts down at the same time is not support")
    test_stub.skip_if_scenario_is_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for index in test_mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (mn_host_list[index].ip_))
        test_stub.shutdown_host_network(mn_host_list[index], test_lib.all_scenario_config)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host))

    test_util.test_logger("recover host [%s]" % (mn_host_list[test_mn_host_list[-1]]))
    test_stub.recover_host(mn_host_list[test_mn_host_list[-1]], test_lib.all_scenario_config, test_lib.deploy_config)
    test_mn_host_list.pop()

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 30s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    test_util.test_logger("wait for 5 minutes to see if management node starts again")
    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)


    test_stub.ensure_hosts_connected()
    test_stub.ensure_pss_connected()
    test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file, test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.ensure_bss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def env_recover():
    test_util.test_logger("recover host: %s" % (test_host.ip_))
    test_stub.recover_host(test_host, test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()
    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host, test_lib.all_scenario_config, downMagt=False)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) == 0:
        test_util.test_fail('MN VM has been murdered, expected result should not be impacted when the separated network is down.')

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 120s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    test_stub.return_pass_ahead_if_3sites("TEST PASS")
    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Beispiel #17
0
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list

    test_stub.skip_if_scenario_not_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host, test_lib.all_scenario_config, downMagt=True)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) == 0:
        test_util.test_fail('MN VM has been murdered!!! Expected should not be impacted when management network is down.')

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 120s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail("management node does not recover after MN VM is running")

    for index in need_recover_mn_host_list:
        test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=True)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Beispiel #18
0
def env_recover():
    test_util.test_logger("recover host: %s" % (mn_host[0].ip_))
    test_stub.recover_host(mn_host[0], test_lib.all_scenario_config,
                           test_lib.deploy_config)
def test():
    global vm
    global mn_host
    for i in range(0, 10):
        test_util.test_logger("force shutdown host round %s" % (i))

        mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                              test_lib.scenario_file)
        if len(mn_host) != 1:
            test_util.test_fail('MN VM is running on %d host(s)' %
                                len(mn_host))
        test_util.test_logger(
            "force shutdown host [%s] that mn vm is running on" %
            (mn_host[0].ip_))
        test_stub.stop_host(mn_host[0], test_lib.all_scenario_config, 'cold')
        test_util.test_logger(
            "wait for 40 seconds to see if management node VM starts on another host"
        )
        time.sleep(20)

        new_mn_host_ip = test_stub.get_host_by_consul_leader(
            test_lib.all_scenario_config, test_lib.scenario_file)
        if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
            test_util.test_fail(
                "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
                % (new_mn_host_ip, mn_host[0].ip_))

        count = 60
        while count > 0:
            new_mn_host = test_stub.get_host_by_mn_vm(
                test_lib.all_scenario_config, test_lib.scenario_file)
            if len(new_mn_host) == 1:
                test_util.test_logger(
                    "management node VM run after its former host down for 30s"
                )
                break
            elif len(new_mn_host) > 1:
                test_util.test_fail(
                    "management node VM runs on more than one host after its former host down"
                )
            time.sleep(5)
            count -= 1

        if len(new_mn_host) == 0:
            test_util.test_fail(
                "management node VM does not run after its former host down for 30s"
            )
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )

        try:
            node_ops.wait_for_management_server_start(300)
        except:
            test_util.test_fail(
                "management node does not recover after its former host down")

        test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
        test_stub.ensure_bss_host_connected_from_stop(
            test_lib.scenario_file, test_lib.all_scenario_config,
            test_lib.deploy_config)
        test_stub.ensure_bss_connected()
        test_stub.ensure_pss_connected()

        vm = test_stub.create_basic_vm()
        vm.check()
        vm.destroy()
        test_stub.recover_host(mn_host[0], test_lib.all_scenario_config,
                               test_lib.deploy_config)
        test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config,
                                       test_lib.scenario_file)

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global test_mn_host_list

    test_util.test_skip("2 hosts down at the same time is not support")
    test_stub.skip_if_scenario_is_multiple_networks()

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for index in test_mn_host_list:
        test_util.test_logger("force stop host [%s]" %
                              (mn_host_list[index].ip_))
        test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config,
                            'cold')

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    test_util.test_logger("recover host [%s]" %
                          (mn_host_list[test_mn_host_list[-1]]))
    test_stub.recover_host(mn_host_list[test_mn_host_list[-1]],
                           test_lib.all_scenario_config,
                           test_lib.deploy_config)
    test_mn_host_list.pop()

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected()
    test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file,
                                                  test_lib.all_scenario_config,
                                                  test_lib.deploy_config)
    test_stub.ensure_pss_connected()
    test_stub.ensure_bss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    global pub_mn_ip
    global mag_mn_ip

    test_stub.skip_if_scenario_not_multiple_networks()

    pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
    mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
    os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip

    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("shutdown host's network [%s]" % (host.ip_))
        test_stub.shutdown_host_network(host, test_lib.all_scenario_config, downMagt=False)

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 120
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 120s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 120s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    #node_ops.wait_for_management_server_start(300)
    test_stub.wrapper_of_wait_for_management_server_start(600)

    test_stub.ensure_hosts_connected(exclude_host=[mn_host_list[need_recover_mn_host_list[0]]])
    test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    test_stub.return_pass_ahead_if_3sites("TEST PASS")
    vm = test_stub.create_basic_vm()

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
def test():
    global vm
    global mn_host_list
    global test_mn_host_list
    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config,
                                         test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num),
                                      (mn_host_num + 1) / 2)

    for index in test_mn_host_list:
        test_util.test_logger("shutdown host [%s]" % (mn_host_list[index].ip_))
        test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                          test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' %
                            len(mn_host))

    test_util.test_logger("recover host [%s]" %
                          (mn_host_list[test_mn_host_list[-1]]))
    test_stub.recover_host(mn_host_list[test_mn_host_list[-1]],
                           test_lib.all_scenario_config,
                           test_lib.deploy_config)
    test_mn_host_list.pop()

    test_util.test_logger(
        "wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(
        test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail(
            "management node VM not run correctly on [%s] after its former host [%s] down for 20s"
            % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config,
                                                  test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger(
                "management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail(
                "management node VM runs on more than one host after its former host down"
            )
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail(
            "management node VM does not run after its former host down for 30s"
        )
    elif len(new_mn_host) > 1:
        test_util.test_fail(
            "management node VM runs on more than one host after its former host down"
        )

    test_util.test_logger(
        "wait for 5 minutes to see if management node starts again")
    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail(
            "management node does not recover after recover one mn host")

    test_util.test_logger("try to create vm, timeout is 30s")
    time_out = 30
    while time_out > 0:
        try:
            vm = test_stub.create_basic_vm()
            break
        except:
            time.sleep(1)
            time_out -= 1
    if time_out == 0:
        test_util.test_fail('Fail to create vm after mn is ready')

    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')
Beispiel #23
0
def test():
    global vm
    global mn_host_list
    global need_recover_mn_host_list
    mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
    mn_host_num = len(mn_host_list)
    test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)

    for host in mn_host_list:
        test_util.test_logger("force stop host [%s]" % (host.ip_))
        test_stub.stop_host(host, test_lib.all_scenario_config, 'cold')

    need_recover_mn_host_list = range(mn_host_num)

    test_util.test_logger("wait 10s for MN VM to stop")
    time.sleep(10)
    mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
    if len(mn_host) != 0:
        test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host))

    for index in test_mn_host_list:
        test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
        test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
        need_recover_mn_host_list.remove(index)

    test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
    time.sleep(20)

    new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
    if new_mn_host_ip == "":
        test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))

    count = 60
    while count > 0:
        new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
        if len(new_mn_host) == 1:
            test_util.test_logger("management node VM run after its former host down for 30s")
            break
        elif len(new_mn_host) > 1:
            test_util.test_fail("management node VM runs on more than one host after its former host down")
        time.sleep(5)
        count -= 1

    if len(new_mn_host) == 0:
        test_util.test_fail("management node VM does not run after its former host down for 30s")
    elif len(new_mn_host) > 1:
        test_util.test_fail("management node VM runs on more than one host after its former host down")

    try:
        node_ops.wait_for_management_server_start(300)
    except:
        test_util.test_fail("management node does not recover after MN VM is running")

    test_stub.ensure_hosts_connected(exclude_host=[mn_host_list[need_recover_mn_host_list[0]]])
    test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file, test_lib.all_scenario_config, test_lib.deploy_config)
    test_stub.ensure_bss_connected()
    test_stub.ensure_pss_connected()

    vm = test_stub.create_basic_vm()
    vm.check()
    vm.destroy()

    test_util.test_pass('Create VM Test Success')