Esempio n. 1
0
def add_route_for_vm_access(compliance_client):
    """
    Add ip route on compliance test node to access vm from it
    Args:
        compliance_client:

    Returns:

    """
    LOG.fixture_step(
        "Add routes to access VM from compliance server if not already done")
    cidrs = network_helper.get_subnets(
        name="tenant[1|2].*-mgmt0-subnet0|external-subnet0",
        regex=True,
        field='cidr',
        auth_info=Tenant.get('admin'))
    cidrs_to_add = [
        r'{}.0/24'.format(re.findall(r'(.*).\d+/\d+', item)[0])
        for item in cidrs
    ]
    for cidr in cidrs_to_add:
        if compliance_client.exec_cmd(
                'ip route | grep "{}"'.format(cidr))[0] != 0:
            compliance_client.exec_sudo_cmd('ip route add {} via {}'.format(
                cidr, VM_ROUTE_VIA))
Esempio n. 2
0
def test_ceilometer_meters_exist(meters):
    """
    Validate ceilometer meters exist
    Verification Steps:
    1. Check via 'openstack metric list' or 'ceilometer event-list'
    2. Check meters for router, subnet, image, and vswitch exists
    """
    # skip('CGTS-10102: Disable TC until US116020 completes')
    time_create = system_helper.get_host_values('controller-1',
                                                'created_at')[0]
    current_isotime = datetime.utcnow().isoformat(sep='T')

    if common.get_timedelta_for_isotimes(
            time_create, current_isotime) > timedelta(hours=24):
        skip("Over a day since install. Meters no longer exist.")

    # Check meter for routers
    LOG.tc_step(
        "Check number of 'router.create.end' events is at least the number of existing routers"
    )
    routers = network_helper.get_routers()
    router_id = routers[0]
    check_event_in_tenant_or_admin(resource_id=router_id,
                                   event_type='router.create.end')

    # Check meter for subnets
    LOG.tc_step(
        "Check number of 'subnet.create' meters is at least the number of existing subnets"
    )
    subnets = network_helper.get_subnets(
        name=Tenant.get_primary().get('tenant'), strict=False)
    subnet = random.choice(subnets)
    LOG.info("Subnet to check in ceilometer event list: {}".format(subnet))
    check_event_in_tenant_or_admin(resource_id=subnet,
                                   event_type='subnet.create.end')

    # Check meter for image
    LOG.tc_step('Check meters for image')
    images = glance_helper.get_images(field='id')
    resource_ids = gnocchi_helper.get_metrics(metric_name='image.size',
                                              field='resource_id')
    assert set(images) <= set(resource_ids)

    # Check meter for vswitch
    LOG.tc_step('Check meters for vswitch')
    resource_ids = gnocchi_helper.get_metrics(
        metric_name='vswitch.engine.util', fail_ok=True, field='resource_id')
    if system_helper.is_avs():
        hypervisors = host_helper.get_hypervisors()
        assert len(hypervisors) <= len(resource_ids), \
            "Each nova hypervisor should have at least one vSwitch core"
    else:
        assert not resource_ids, "vswitch meters found for STX build"
Esempio n. 3
0
    def teardown():
        """
        If DNS servers are not set, set them.  Deprovision internal DNS.
        """
        global UNRESTORED_DNS_SERVERS
        global HOSTS_AFFECTED

        if UNRESTORED_DNS_SERVERS:
            LOG.fixture_step("Restoring DNS entries to: {}".format(UNRESTORED_DNS_SERVERS))
            subnet_list = network_helper.get_subnets(network=mgmt_net_id)
            set_dns_servers(subnet_list, UNRESTORED_DNS_SERVERS, fail_ok=True)
            UNRESTORED_DNS_SERVERS = []

        if system_helper.get_alarms(alarm_id=EventLogID.CONFIG_OUT_OF_DATE):
            LOG.fixture_step("Config out-of-date alarm(s) present, check {} and lock/unlock if host config out-of-date".
                             format(HOSTS_AFFECTED))
            for host in HOSTS_AFFECTED:
                if system_helper.get_host_values(host, 'config_status')[0] == 'Config out-of-date':
                    LOG.info("Lock/unlock {} to clear config out-of-date status".format(host))
                    host_helper.lock_unlock_hosts(hosts=host)
                HOSTS_AFFECTED.remove(host)
Esempio n. 4
0
def verify_heat_resource(to_verify=None,
                         template_name=None,
                         stack_name=None,
                         auth_info=None,
                         fail_ok=False):
    """
        Verify the heat resource creation/deletion for given resources

        Args:
            to_verify (list): Resources to verify creation or deletion.
            template_name (str): template to be used to create heat stack.
            stack_name(str): stack name used to create the stack
            auth_info
            fail_ok

        Returns (int): return 0 if success 1 if failure

    """
    LOG.info("Verifying heat resource {}".format(to_verify))

    rtn_code = 0
    msg = "Heat resource {} appeared".format(to_verify)
    item_verified = to_verify

    if to_verify is 'volume':
        LOG.info("Verifying volume")
        vol_name = getattr(Heat, template_name)['vol_name']
        resource_found = cinder_helper.get_volumes(name=vol_name)

    elif to_verify is 'ceilometer_alarm':
        resource_found = ceilometer_helper.get_alarms(name=stack_name,
                                                      strict=False)

    elif to_verify is 'neutron_port':
        port_name = getattr(Heat, template_name)['port_name']
        if port_name is None:
            port_name = stack_name
        resource_found = network_helper.get_ports(port_name=port_name)

    elif to_verify is 'neutron_provider_net_range':
        resource_found = network_helper.get_network_segment_ranges(
            field='name', physical_network='sample_physnet_X')

    elif to_verify is 'nova_server_group':
        resource_found = nova_helper.get_server_groups(name=stack_name)

    elif to_verify is 'vm':
        vm_name = getattr(Heat, template_name)['vm_name']
        resource_found = vm_helper.get_vms(vms=vm_name, strict=False)

    elif to_verify is 'nova_flavor':
        resource_found = nova_helper.get_flavors(name='sample-flavor')

    elif to_verify is 'neutron_net':
        resource_found = network_helper.get_tenant_net_id(
            net_name='sample-net')

    elif to_verify is 'image':
        resource_found = glance_helper.get_image_id_from_name(
            name='sample_image')

    elif to_verify is 'subnet':
        resource_found = network_helper.get_subnets(name='sample_subnet')

    elif to_verify is 'floating_ip':
        resource_found = network_helper.get_floating_ips()

    elif to_verify is 'router':
        resource_found = network_helper.get_tenant_router(
            router_name='sample_router', auth_info=auth_info)

    elif to_verify is 'router_gateway':
        item_verified = 'sample_gateway_router'
        resource_found = network_helper.get_tenant_router(
            router_name='sample_gateway_router', auth_info=auth_info)
        if resource_found:
            item_verified = to_verify
            resource_found = network_helper.get_router_ext_gateway_info(
                router_id=resource_found, auth_info=auth_info)

    elif to_verify is 'router_interface':
        item_verified = 'sample_if_router'
        router_id = network_helper.get_tenant_router(
            router_name='sample_if_router', auth_info=auth_info)
        resource_found = router_id
        if resource_found:
            item_verified = 'sample_if_subnet'
            subnets = network_helper.get_subnets(name='sample_if_subnet',
                                                 auth_info=auth_info)
            resource_found = subnets
            if resource_found:
                item_verified = to_verify
                router_subnets = network_helper.get_router_subnets(
                    router=router_id, auth_info=auth_info)
                resource_found = resource_found[0] in router_subnets

    elif to_verify is 'security_group':
        resource_found = network_helper.get_security_groups(
            name='SecurityGroupDeluxe')
    elif to_verify is 'key_pair':
        kp_name = getattr(Heat, template_name)['key_pair_name']
        resource_found = nova_helper.get_keypairs(name=kp_name)
    elif to_verify is 'neutron_qos':
        resource_found = network_helper.get_qos_policies(name='SampleQoS',
                                                         auth_info=auth_info)
    else:
        raise ValueError("Unknown item to verify: {}".format(to_verify))

    if not resource_found:
        msg = "Heat stack {} resource {} does not exist".format(
            stack_name, item_verified)
        if fail_ok:
            rtn_code = 1
        else:
            assert resource_found, msg

    LOG.info(msg)
    return rtn_code, msg
Esempio n. 5
0
def test_ping_between_vms_using_hostnames(func_recover):
    """
    This test includes a positive test and a negative test.

    Positive Test:
    Verify that VMs can interact using hostnames after internal dns is setup.

    Negative Test:
    Verify VMS can no longer interact with each other using hostnames after
    disabling internal dns.

    Args:
        - Nothing

    Setup:
        - Nothing

    Test Steps:
        - Delete existing VMs and volumes
        - Provision internal dns name resolution
        - Query DNS entries for subnet and store
        - If DNS entries are not present, set them to a default value
        - Delete dns servers for desired subnet
        - Launch two VMs in the same network
        - Log into the guests and ping the other VM
        - Restore DNS entries for subnet
        - Delete VMs and volumes created during test
        - Disable internal dns name resolution
        - Launch two new VMs in the same network
        - Log into the guest and ping the other VM (should fail)
        - Delete VMS and volumes created during test

    Returns:
        - Nothing

    Teardown:
        - Check the DNS Server entries
        - If not set, restore to original values

    """

    mgmt_net_id = func_recover
    subnet_list = network_helper.get_subnets(network=mgmt_net_id)

    LOG.tc_step("Store existing DNS entries so they can be restored later")
    dns_servers = network_helper.get_subnet_values(
        subnet_list[0], fields='dns_nameservers')[0].split(', ')
    if not dns_servers:
        LOG.tc_step("No DNS servers found. Setting DNS servers to defaults")
        dns_servers = DEFAULT_DNS_SERVERS
        set_dns_servers(subnet_list, dns_servers)

    global UNRESTORED_DNS_SERVERS
    UNRESTORED_DNS_SERVERS = dns_servers
    global HOSTS_AFFECTED
    hosts = host_helper.get_hypervisors()
    HOSTS_AFFECTED = hosts

    LOG.tc_step("Enabling internal dns resolution")
    provision_internal_dns(hosts=hosts)
    HOSTS_AFFECTED = []

    LOG.tc_step("Modify DNS entries for each subnet in the network")
    subnet_list = network_helper.get_subnets(network=mgmt_net_id)
    set_dns_servers(subnet_list)

    LOG.tc_step("Launch two VMs using the same network")
    nics = [{"net-id": mgmt_net_id}]
    vm1_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm2_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm1_name = vm_helper.get_vm_name_from_id(vm1_id)
    vm2_name = vm_helper.get_vm_name_from_id(vm2_id)

    LOG.tc_step("Log into each VM and ping the other VM using the hostname")
    cmd = "ping -c 3 {}".format(vm2_name)
    with vm_helper.ssh_to_vm_from_natbox(vm1_id) as vm_ssh:
        vm_ssh.exec_cmd(cmd, fail_ok=False)
    cmd = "ping -c 3 {}".format(vm1_name)
    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm_ssh:
        vm_ssh.exec_cmd(cmd, fail_ok=False)

    LOG.tc_step("Restore DNS entries for each subnet in the network")
    set_dns_servers(subnet_list, dns_servers)
    UNRESTORED_DNS_SERVERS = []

    LOG.tc_step("Cleanup VMs")
    vm_helper.delete_vms()

    LOG.tc_step("Disabling internal dns resolution")
    HOSTS_AFFECTED = hosts
    deprovision_internal_dns(hosts=hosts)
    HOSTS_AFFECTED = []

    LOG.tc_step("Launch two VMs using the same network")
    vm1_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm2_id = vm_helper.boot_vm(nics=nics, cleanup='function')[1]
    vm1_name = vm_helper.get_vm_name_from_id(vm1_id)
    vm2_name = vm_helper.get_vm_name_from_id(vm2_id)

    LOG.tc_step("Log into each VM and ping the other VM using the hostname")
    cmd = "ping -c 3 {}".format(vm2_name)
    with vm_helper.ssh_to_vm_from_natbox(vm1_id) as vm_ssh:
        rc, out = vm_ssh.exec_cmd(cmd, fail_ok=True)
        assert rc == 2, out
    cmd = "ping -c 3 {}".format(vm1_name)
    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm_ssh:
        rc, out = vm_ssh.exec_cmd(cmd, fail_ok=True)
        assert rc == 2, out

    LOG.tc_step("Cleanup VMs")
    vm_helper.delete_vms()