def test_sanityrefresh_lockunlockwithvms(conn):
    """ This test performs a lock/unlock of a host with VMs. 
        Inputs:
        * conn (string) - ID of pexpect session
        Outputs:
        * None.  We will simply reports if the test failed 
    """

    vmlist_virtio = vmlist_avp = vmlist_vswitch = []
    testFailed_flag = False

    test_name = "test_sanityrefresh_lockunlockwithvms"

    # Get time
    test_start_time = datetime.datetime.now()
    logging.info("Starting %s at %s" % (test_name, test_start_time))

    # source /etc/nova/openrc
    logging.info("Test Step 1: source nova openrc")
    nova.source_nova(conn)

    # Get the UUID for the user we're interested in
    logging.info("Test Step 2: get the IDs associated with tenant users")
    tenant1_id = keystone.get_userid(conn, "tenant1")
    tenant2_id = keystone.get_userid(conn, "tenant2")

    # Get the list of VMs on the system
    logging.info("Test Step 3: Check if there are already VMs on the system")
    vm_list = nova.get_novavms(conn, "name")

    # Check that there are VMs on the system
    if not vm_list:
        logging.warning("There are no VMs present on the system so the test " \
                               "will launch some to proceed.")
        vmlist_virtio = vm.exec_launchvmscript(conn, "tenant1", "virtio", 1)
        vmlist_avp = vm.exec_launchvmscript(conn, "tenant2", "avp", 2)
        vmlist_vswitch = vm.exec_launchvmscript(conn, "tenant1", "vswitch", 1)
        expectedvm_list = vmlist_virtio + vmlist_avp + vmlist_vswitch
        vm_list = nova.get_novavms(conn, "name")
        if set(vm_list) != set(expectedvm_list):
            logging.error(
                "Expected the following VMs: %s, instead we have the following: %s"
                % (expectedvm_list, vm_list))
            logging.info("This means that not all expected VMs were launched.")
            testFailed_flag = True

    # Totals vms
    total_vms = len(vm_list)

    # Check if we're small footprint
    smallfootprint = sysinv.check_smallfootprint(conn)
    if smallfootprint == True:
        logging.info("This system is small footprint enabled.")

    # Determine which hypervisors we have in the system
    hypervisor_list = nova.get_novahypervisors(conn)
    logging.info("The system has the following hypervisors: %s" %
                 hypervisor_list)

    # For each hypervisor, determine the VMs associated with it
    hypvm_dict = {}
    for hypervisor in hypervisor_list:
        hypervisorserver_list = nova.get_hypervisorservers(conn, hypervisor)
        logging.info("VMs on server %s: %s" %
                     (hypervisor, hypervisorserver_list))
        # dict has key hypervisor, e.g. compute-0, value: number of vms
        hypvm_dict[hypervisor] = len(hypervisorserver_list)

    # Get the hostname of the hypervisor with the max VMs
    hypervisor_max = max(hypvm_dict, key=lambda x: hypvm_dict.get(x))

    # Lock the host with the most VMs
    logging.info("Test Step 4: Lock the hypervisor with most VMs")
    result = sysinv.lock_host(conn, hypervisor_max)

    # We should poll and set a hard-limit for timeout (preferrably get from dev)
    hypervisor_state = ""
    i = 0
    # Move the timeout to constants (FIXME)
    while i < 60:
        hypervisor_state = sysinv.get_hostshowvalue(conn,
                                                    hypervisor_max,
                                                    field="administrative")
        if hypervisor_state == "locked":
            logging.info("Hypervisor %s locked as expected" % hypervisor_max)
            break
        i = i + 1
        time.sleep(1)

    # Test fails if we couldn't lock the host with the VMs
    if hypervisor_state != "locked":
        logging.error("Failed to lock %s" % hypervisor_max)
        testFailed_flag = True

    # After the host is locked, ensure that no VMs remain on it
    logging.info(
        "Test Step 5: Ensure the VMs migrated off the locked hypervisor")
    if hypervisor_state == "locked":
        vms_after = nova.get_hypervisorservers(conn, hypervisor_max)
        if len(vms_after) != 0:
            logging.error("Not all VMs migrated off %s" % hypervisor_max)
            logging.error("The following VMs are still on %s: %s" %
                          (vms_after, hypervisor_max))
            testFailed_flag = True

        # FIXME: Check if the VMs that are on another host are available/paused/etc. (not error)

        # Unlock the host at the end of the test
        logging.info("Unlock the host at the end of the test")
        result = sysinv.unlock_host(conn, hypervisor_max)

        # Wait until the host becomes available
        i = 0
        # Move to constants (FIXME)
        while i < 180:
            hypervisor_state = sysinv.get_hostshowvalue(conn,
                                                        hypervisor_max,
                                                        field="availability")
            if hypervisor_state == "available":
                logging.info("Hypervisor %s is now unlocked" % hypervisor_max)
                break
            i = i + 1
            time.sleep(1)

    # just for information
    if testFailed_flag == True:
        logging.info("Test result: FAILED")
    else:
        logging.info("Test result: PASSED")

    # Test end time
    test_end_time = datetime.datetime.now()
    test_duration = test_end_time - test_start_time
    logging.info("Ending %s at %s" % (test_name, test_end_time))
    logging.info("Test ran for %s" % test_duration)
Esempio n. 2
0
def test_sanityrefresh_coldmigratevms(conn):
    """ This test performs a cold migration of VMs.  If there are no VMs on the
        system, it will launch some.  It will do two cold migrations:

        1.  cold migrate and confirm 
        2.  cold migrate and revert

        It will do this for each VM on the system.
 
        Inputs:
        * conn (string) - ID of pexpect session
        Outputs:
        * testFailed_flag - True if the test fails, False otherwise 
    """
    
    vmlist_virtio = vmlist_avp = vmlist_vswitch = []
    testFailed_flag = False

    test_name = "test_sanityrefresh_cold_migrate_vms"
    
    # Get time
    test_start_time = datetime.datetime.now()
    logging.info("Starting %s at %s" % (test_name, test_start_time))
    
    # source /etc/nova/openrc
    logging.info("Test Step 1: source nova openrc")
    nova.source_nova(conn)
   
    # Get the UUID for the user we're interested in
    logging.info("Test Step 2: get the IDs associated with tenant users")
    tenant1_id = keystone.get_userid(conn, "tenant1")
    tenant2_id = keystone.get_userid(conn, "tenant2")
    
    # Get the list of VMs on the system 
    logging.info("Test Step 3: Check if there are already VMs on the system")
    vm_list = nova.get_novavms(conn, "name")

    # Check that there are VMs on the system
    if not vm_list: 
	logging.warning("There are no VMs present on the system so the test " \
                        "will launch some to proceed.")
        vmlist_virtio = vm.exec_launchvmscript(conn, "tenant1", "virtio", 1)
        vmlist_avp = vm.exec_launchvmscript(conn, "tenant2", "avp", 1)
        #vmlist_vswitch = vm.exec_launchvmscript(conn, "tenant1", "vswitch", 1)
        expectedvm_list = vmlist_virtio + vmlist_avp + vmlist_vswitch
        vm_list = nova.get_novavms(conn, "name")
        if set(vm_list) != set(expectedvm_list):
            logging.error("Expected the following VMs: %s, instead we have the following: %s" %  
                         (expectedvm_list, vm_list))
            logging.info("This means that not all expected VMs were launched.")
            testFailed_flag = True

    logging.info("Test Step 4: Cold migrating instance and then confirming resize")
    vm_list = nova.get_novavms(conn, "id")
    for item in vm_list:
        current_vm_host = nova.get_novashowvalue(conn, item, "host")
        logging.info("VM %s is on host %s" % (item, current_vm_host))
        test_result1 = vm.exec_vm_migrate(conn, item, "cold")

        if not test_result1:
            testFailed_flag = True

    logging.info("Test Step 5: Cold migrating instance and then resize reverting")
    for item in vm_list:
        current_vm_host = nova.get_novashowvalue(conn, item, "host")
        logging.info("VM %s is on host %s" % (vm_list[0], current_vm_host))
        test_result2 = vm.exec_vm_migrate(conn, vm_list[0], "cold", "revert")

        if not test_result2:
            testFailed_flag = True 

    # just for information
    if testFailed_flag == True:
        logging.info("Test result: FAILED")
    else:
        logging.info("Test result: PASSED")

    # Test end time
    test_end_time = datetime.datetime.now()
    test_duration = test_end_time - test_start_time
    logging.info("Ending %s at %s" % (test_name, test_end_time))
    logging.info("Test ran for %s" % test_duration)
def test_sanityrefresh_pingbetweenvms(conn):
    """ This test performs a ping between the VMs.  If there are no VMs, some are launched
        so the test can continue.  It will ssh into the NAT box and from there, log into
        each VM, and perform the pings. 
        Inputs:
        * conn (string) - ID of pexpect session
        Outputs:
        * testFailed_flag - True if the test fails, false otherwise 
    """

    vmlist_virtio = vmlist_avp = vmlist_vswitch = []
    testFailed_flag = False

    test_name = "test_sanityrefresh_pingfromnatbox"

    # Get time
    test_start_time = datetime.datetime.now()
    logging.info("Starting %s at %s" % (test_name, test_start_time))

    # source /etc/nova/openrc
    logging.info("Test Step 1: source nova openrc")
    nova.source_nova(conn)

    # Get the UUID for the user we're interested in
    logging.info("Test Step 2: get the IDs associated with tenant users")
    tenant1_id = keystone.get_userid(conn, "tenant1")
    tenant2_id = keystone.get_userid(conn, "tenant2")

    # Get the list of VMs on the system
    logging.info("Test Step 3: Check if there are already VMs on the system")
    vm_list = nova.get_novavms(conn, "name")

    # Check that there are VMs on the system
    if not vm_list:
        logging.warning("There are no VMs present on the system so the test " \
                               "will launch some to proceed.")
        vmlist_virtio = vm.exec_launchvmscript(conn, "tenant1", "virtio", 1)
        vmlist_avp = vm.exec_launchvmscript(conn, "tenant2", "avp", 1)
        vmlist_vswitch = vm.exec_launchvmscript(conn, "tenant1", "vswitch", 1)
        expectedvm_list = vmlist_virtio + vmlist_avp + vmlist_vswitch
        vm_list = nova.get_novavms(conn, "name")
        if set(vm_list) != set(expectedvm_list):
            logging.error(
                "Expected the following VMs: %s, instead we have the following: %s"
                % (expectedvm_list, vm_list))
            logging.info("This means that not all expected VMs were launched.")
            testFailed_flag = True

    logging.info(
        "Test Step 4: Extract VM management IPs, ssh to the VMs and ping the other VMs in the system."
    )
    result = vm.ping_between_vms(conn)

    if result == True:
        testFailed_flag = True

    # just for information
    if testFailed_flag == True:
        logging.info("Test result: FAILED")
    else:
        logging.info("Test result: PASSED")

    # Test end time
    test_end_time = datetime.datetime.now()
    test_duration = test_end_time - test_start_time
    logging.info("Ending %s at %s" % (test_name, test_end_time))
    logging.info("Test ran for %s" % test_duration)

    return testFailed_flag
Esempio n. 4
0
def test_sanityrefresh_livemigratevms(conn):
    """ This test performs a live migration of vms.  
        Inputs:
        * conn (string) - ID of pexpect session
        Outputs:
        * testFailed_flag - True if the test fails, False otherwise 
    """
    
    vmlist_virtio = vmlist_avp = vmlist_vswitch = []
    testFailed_flag = False

    test_name = "test_sanityrefresh_live_migrate_vms"
    
    # Get time
    test_start_time = datetime.datetime.now()
    logging.info("Starting %s at %s" % (test_name, test_start_time))
    
    # source /etc/nova/openrc
    logging.info("Test Step 1: source nova openrc")
    nova.source_nova(conn)
   
    # Get the UUID for the user we're interested in
    logging.info("Test Step 2: get the IDs associated with tenant users")
    tenant1_id = keystone.get_userid(conn, "tenant1")
    tenant2_id = keystone.get_userid(conn, "tenant2")
    
    # Get the list of VMs on the system 
    logging.info("Test Step 3: Check if there are already VMs on the system")
    vm_list = nova.get_novavms(conn, "name")

    # Check that there are VMs on the system
    if not vm_list: 
	logging.warning("There are no VMs present on the system so the test " \
                        "will launch some to proceed.")
        vmlist_virtio = vm.exec_launchvmscript(conn, "tenant1", "virtio", 1)
        vmlist_avp = vm.exec_launchvmscript(conn, "tenant2", "avp", 1)
        #vmlist_vswitch = vm.exec_launchvmscript(conn, "tenant1", "vswitch", 1)
        expectedvm_list = vmlist_virtio + vmlist_avp + vmlist_vswitch
        vm_list = nova.get_novavms(conn, "name")
        if set(vm_list) != set(expectedvm_list):
            logging.error("Expected the following VMs: %s, instead we have the following: %s" %  
                         (expectedvm_list, vm_list))
            logging.info("This means that not all expected VMs were launched.")
            testFailed_flag = True

    logging.info("Test Step 4: Live migrating without a destination host specified")
    vm_list = nova.get_novavms(conn, "id")
    for item in vm_list:
        test_result1 = vm.exec_vm_migrate(conn, item, "live")
        if not test_result1:
            testFailed_flag = True

    logging.info("Learn what hosts are on the system.")
    hostname_list, cont_hostname_list, comp_hostname_list, stor_hostname_list = sysinv.get_hosts(conn)

    logging.info("Test Step 5: Live migrating with a destination host specified")
    for item in vm_list:
        current_vm_host = nova.get_novashowvalue(conn, item, "host")
        logging.info("VM %s is on host %s" % (item, current_vm_host))
        host_personality = sysinv.get_hostpersonality(conn, current_vm_host)

        # Automatically determine which host to migrate to
        if host_personality == "controller":
            subset_hostname_list = copy.deepcopy(cont_hostname_list)
        else:
            subset_hostname_list = copy.deepcopy(comp_hostname_list)
        dest_vm_host = subset_hostname_list.remove(current_vm_host)
        dest_vm_host = random.choice(subset_hostname_list)

        logging.info("Live migrating VM %s from %s to %s" % (item, current_vm_host, dest_vm_host)) 
        test_result2 = vm.exec_vm_migrate(conn, item, "live", dest_vm_host)
        if not test_result2:
            testFailed_flag = True 

        # just for information
        if testFailed_flag == True:
            logging.info("Test result: FAILED")
        else:
            logging.info("Test result: PASSED")

    # Test end time
    test_end_time = datetime.datetime.now()
    test_duration = test_end_time - test_start_time
    logging.info("Ending %s at %s" % (test_name, test_end_time))
    logging.info("Test ran for %s" % test_duration)

    return testFailed_flag
Esempio n. 5
0
def test_sanityrefresh_systemsetup(conn):
    """ This test sets up the system for use by sanity.
        Inputs:
        * conn - ID of pexpect session
        Outputs:
        * testFailed_flag - True if test fails, false otherwise
    """

    testFailed_flag = False

    # source /etc/nova/openrc
    nova.source_nova(conn)

    # Get the UUID for the user we're interested in
    tenant1_id = keystone.get_userid(conn, "tenant1")
    tenant2_id = keystone.get_userid(conn, "tenant2")

    # Get the nova quota for tenant1
    list_novaquota(conn, tenant1_id)

    # Update quotas so we don't run out
    max_cores = "100"
    max_instances = "100"
    max_ram = "51200"
    put_novaquota(conn, tenant1_id, "cores", max_cores)

    # Update quotas so we don't run out
    put_novaquota(conn, tenant1_id, "instances", max_instances)

    # Update quotas so we don't run out (this may be default)
    put_novaquota(conn, tenant1_id, "ram", max_ram)

    # Get the nova quota for tenant1
    list_novaquota(conn, tenant1_id)

    # Add a check to see if nova quotas were updated
    result = get_novaquotavalue(conn, tenant1_id, "cores")
    if result == max_cores:
        logging.info("Nova cores have been set correctly to %s" % max_cores)
    else:
        logging.warning(
            "Nova cores not set correctly.  Expected %s, received %s" %
            (max_cores, result))

    # Get the cinder quotas for tenant1
    list_cinderquota(conn, tenant1_id)

    # volumes
    max_volumes = "100"

    # Update the quota for tenant1
    put_cinderquota(conn, tenant1_id, "volumes", max_volumes)

    # result = get_cinderquotavalue(conn, tenant1_id, "volumes")
    if result == max_volumes:
        logging.info("Cinder volumes have been set correctly to %s" %
                     max_volumes)
    else:
        logging.warning(
            "Cinder volumes not set correctly.  Expected %s, received %s" %
            (max_volumes, result))

    # list existing flavors
    flavorid_list = list_novaflavors(conn)
    logging.info("Extracted flavor IDs: %s" % flavorid_list)

    # create new flavors
    put_bulknovaflavors(conn)

    # create flavor-keys to go with the newly created flavors
    put_bulknovaflavorkeys(conn)

    # try deleting a flavor just for fun
    delete_novaflavor(conn, "fds")

    # Launch VMs via script
    vmlist_virtio = vm.exec_launchvmscript(conn, "tenant1", "virtio", 1)
    vmlist_avp = vm.exec_launchvmscript(conn, "tenant2", "avp", 1)
    vmlist_vswitch = vm.exec_launchvmscript(conn, "tenant1", "vswitch", 1)
    expectedvm_list = vmlist_virtio + vmlist_avp + vmlist_vswitch

    # Get an updated list of VMs that have been launched by the system
    vm_list = nova.get_novavms(conn, "name")

    # Check if expected VMs are present
    logging.info(
        "Test will fail if the expected VMs are not present on the system.")
    if vm_list == expectedvm_list:
        logging.info("Test result: PASSED")
    else:
        logging.error("Current VMs %s not equivalent to expected VMs %s" %
                      (vm_list, expectedvm_list))
        logging.info("Test result: FAILED")
        testFailed_flag = True

    # Test end time
    test_end_time = datetime.datetime.now()
    test_duration = test_end_time - test_start_time
    logging.info("Ending %s at %s" % (test_name, test_end_time))
    logging.info("Test ran for %s" % test_duration)

    return testFailed_flag
Esempio n. 6
0
def test_sanityrefresh_swactcontrollerswithvms(conn):
    """ This test performs a swact of a controller with VMs. 
        Inputs:
        * conn (string) - ID of pexpect session
        Outputs:
        * None.  We will simply reports if the test failed 
    """

    vmlist_virtio = vmlist_avp = vmlist_vswitch = []
    testFailed_flag = False

    test_name = "test_sanityrefresh_swactcontrollerswithvms"

    # Get time
    test_start_time = datetime.datetime.now()
    logging.info("Starting %s at %s" % (test_name, test_start_time))

    # source /etc/nova/openrc
    logging.info("Test Step 1: source nova openrc")
    nova.source_nova(conn)

    # Get the UUID for the user we're interested in
    logging.info("Test Step 2: get the IDs associated with tenant users")
    tenant1_id = keystone.get_userid(conn, "tenant1")
    tenant2_id = keystone.get_userid(conn, "tenant2")

    # Get the list of VMs on the system
    logging.info("Test Step 3: Check if there are already VMs on the system")
    vm_list = nova.get_novavms(conn, "name")

    # Check that there are VMs on the system
    if not vm_list:
        logging.warning("There are no VMs present on the system so the test " \
                               "will launch some to proceed.")
        vmlist_virtio = vm.exec_launchvmscript(conn, "tenant1", "virtio", 1)
        vmlist_avp = vm.exec_launchvmscript(conn, "tenant2", "avp", 2)
        vmlist_vswitch = vm.exec_launchvmscript(conn, "tenant1", "vswitch", 1)
        expectedvm_list = vmlist_virtio + vmlist_avp + vmlist_vswitch
        vm_list = nova.get_novavms(conn, "name")
        if set(vm_list) != set(expectedvm_list):
            logging.error(
                "Expected the following VMs: %s, instead we have the following: %s"
                % (expectedvm_list, vm_list))
            logging.info("This means that not all expected VMs were launched.")
            testFailed_flag = True

    # The active controller is the current host
    current_host = sysinv.get_hostname(conn)

    # Get list of VMs per hypervisor
    logging.info("Test 4: Get hypervisor to VM association")
    hypvm_dict = nova.get_hypervisorvms(conn)

    # Only applies to small footprint
    # In theory, the active controller could have no VMs, so maybe lock/unlock
    # inactive controller

    # Swact the active controller
    logging.info("Test Step 5: Swact the active controller")
    result = sysinv.swact_host(conn, current_host)

    # We'll now get kicked out of ssh. Wait and then reconnect and source openrc
    logging.info("Test Step 6: Wait %s seconds before reconnecting" %
                 SWACT_MAXTIME)
    time.sleep(SWACT_MAXTIME)
    conn = Session(timeout=TIMEOUT)
    conn.connect(hostname=floating_ip, username=USERNAME, password=PASSWORD)
    conn.setecho(ECHO)
    nova.source_nova(conn)

    # Check that we are now on a different host, otherwise test failed
    new_currenthost = sysinv.get_hostname(conn)
    if current_host == new_currenthost:
        logging.error("Swact of host %s failed" % current_host)
        testFailed_flag = True

    # Could check that all nova services are up

    # Check the VMs again to ensure they have not migrated
    new_hypvmdict = nova.get_hypervisorvms(conn)
    if hypvm_dict != new_hypvmdict:
        logging.error("VM list before swact is not equal to VM list after.")
        logging.info("VMs before swact: %s" % hypvm_dict)
        logging.info("VMs after swact: %s" % new_hypvmdict)
        testFailed_flag = True

    # Ping VMs from NAT box
    logging.info("Test Step 5: Ensure we can ping the VMs from the NAT box")
    result = vm.ping_vms_from_natbox(conn, ping_duration=3)
    if result:
        testFailed_flag = True

    # Ping VMs internally
    logging.info("Test Step 6: Ensure we can ping between the VMs")
    result = vm.ping_between_vms(conn, no_packets=3)
    if result:
        testFailed_flag = True

    # just for information
    if testFailed_flag == True:
        logging.info("Test result: FAILED")
    else:
        logging.info("Test result: PASSED")

    # Test end time
    test_end_time = datetime.datetime.now()
    test_duration = test_end_time - test_start_time
    logging.info("Ending %s at %s" % (test_name, test_end_time))
    logging.info("Test ran for %s" % test_duration)

    return testFailed_flag