Esempio n. 1
0
def pci_prep():
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()
    primary_tenant_name = primary_tenant['tenant']
    vm_helper.set_quotas(tenant=primary_tenant_name, cores=100)
    vm_helper.set_quotas(tenant=other_tenant['tenant'], cores=100)
    return primary_tenant, primary_tenant_name, other_tenant
Esempio n. 2
0
def test_horizon_non_bootable_volume_launch_as_instance_negative(volumes_pg_action):
    """
    This test case checks launch as instance option does not exist for non-bootable volume:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Instances
        - Create a non bootable volume

    Teardown:
        - Back to Instances page
        - Delete the newly created volume
        - Logout

    Test Steps:
        - Launch volume as instance
        - Check that ValueError exception is raised
    """
    volumes_pg_action, volume_name = volumes_pg_action
    instance_name = helper.gen_resource_name('volume_instance')
    LOG.tc_step('Meet Error when launching non-bootable volume {} as instance'.format(volume_name))
    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flavor_name = nova_helper.get_basic_flavor(rtn_id=False)

    with raises(ValueError):
        volumes_pg_action.launch_as_instance(volume_name, instance_name, delete_volume_on_instance_delete=True,
                                             flavor_name=flavor_name, network_names=[mgmt_net_name])
    horizon.test_result = True
def get_user_data_file():
    """
    This function is a workaround to adds user_data  for restarting the sshd. The
    sshd daemon fails to start in VM evacuation testcase.


    Returns:(str) - the file path of the userdata text file

    """

    auth_info = Tenant.get_primary()
    tenant = auth_info['tenant']
    user_data_file = "{}/userdata/{}_test_userdata.txt".format(
        ProjVar.get_var('USER_FILE_DIR'), tenant)
    client = get_cli_client()
    cmd = "test -e {}".format(user_data_file)
    rc = client.exec_cmd(cmd)[0]
    if rc != 0:
        cmd = "cat <<EOF > {}\n" \
              "#cloud-config\n\nruncmd: \n - /etc/init.d/sshd restart\n" \
              "EOF".format(user_data_file)
        print(cmd)
        code, output = client.exec_cmd(cmd)
        LOG.info("Code: {} output: {}".format(code, output))

    return user_data_file
Esempio n. 4
0
def test_horizon_create_delete_server_group(server_groups_pg, policy):
    """
    Tests the server group creation and deletion functionality:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Server Groups page

    Teardown:
        - Go to instance page
        - Delete the created instance
        - Go to server group page
        - Delete the created group server
        - Logout

    Test Steps:
        - Create a new server group
        - Verify the group appears in server groups table
        - Launch instance with new created server group
        - Verify the instance status is active
    """
    server_groups_pg, group_name, instance_name = server_groups_pg

    # is_best_effort = True if best_effort == 'best_effort' else False
    LOG.tc_step('Create a new server group')
    server_groups_pg.create_server_group(name=group_name,
                                         policy='string:' + policy)
    assert not server_groups_pg.find_message_and_dismiss(messages.ERROR), \
        '{} creation error'.format(group_name)

    LOG.tc_step('Verify the group appears in server groups table')
    assert server_groups_pg.is_server_group_present(group_name)

    LOG.tc_step('Launch instance with new created server group')
    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flavor_name = nova_helper.get_basic_flavor(rtn_id=False)
    guest_img = GuestImages.DEFAULT['guest']
    instances_pg = instancespage.InstancesPage(server_groups_pg.driver,
                                               port=server_groups_pg.port)
    instances_pg.go_to_target_page()
    instances_pg.create_instance(instance_name,
                                 source_name=guest_img,
                                 flavor_name=flavor_name,
                                 network_names=[mgmt_net_name],
                                 server_group_name=group_name)
    assert not instances_pg.find_message_and_dismiss(messages.ERROR), \
        'instance: {} creation error'.format(instance_name)

    LOG.tc_step('Verify the instance status is active')
    assert instances_pg.is_instance_active(instance_name), \
        'instance: {} status is not active'.format(instance_name)

    horizon.test_result = True
Esempio n. 5
0
def test_ceilometer_meters_exist(meters):
    """
    Validate ceilometer meters exist
    Verification Steps:
    1. Check via 'openstack metric list' or 'ceilometer event-list'
    2. Check meters for router, subnet, image, and vswitch exists
    """
    # skip('CGTS-10102: Disable TC until US116020 completes')
    time_create = system_helper.get_host_values('controller-1',
                                                'created_at')[0]
    current_isotime = datetime.utcnow().isoformat(sep='T')

    if common.get_timedelta_for_isotimes(
            time_create, current_isotime) > timedelta(hours=24):
        skip("Over a day since install. Meters no longer exist.")

    # Check meter for routers
    LOG.tc_step(
        "Check number of 'router.create.end' events is at least the number of existing routers"
    )
    routers = network_helper.get_routers()
    router_id = routers[0]
    check_event_in_tenant_or_admin(resource_id=router_id,
                                   event_type='router.create.end')

    # Check meter for subnets
    LOG.tc_step(
        "Check number of 'subnet.create' meters is at least the number of existing subnets"
    )
    subnets = network_helper.get_subnets(
        name=Tenant.get_primary().get('tenant'), strict=False)
    subnet = random.choice(subnets)
    LOG.info("Subnet to check in ceilometer event list: {}".format(subnet))
    check_event_in_tenant_or_admin(resource_id=subnet,
                                   event_type='subnet.create.end')

    # Check meter for image
    LOG.tc_step('Check meters for image')
    images = glance_helper.get_images(field='id')
    resource_ids = gnocchi_helper.get_metrics(metric_name='image.size',
                                              field='resource_id')
    assert set(images) <= set(resource_ids)

    # Check meter for vswitch
    LOG.tc_step('Check meters for vswitch')
    resource_ids = gnocchi_helper.get_metrics(
        metric_name='vswitch.engine.util', fail_ok=True, field='resource_id')
    if system_helper.is_avs():
        hypervisors = host_helper.get_hypervisors()
        assert len(hypervisors) <= len(resource_ids), \
            "Each nova hypervisor should have at least one vSwitch core"
    else:
        assert not resource_ids, "vswitch meters found for STX build"
Esempio n. 6
0
def test_horizon_volume_launch_as_instance(volumes_pg_action):
    """
    This test case checks launch volume as instance functionality:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Instances
        - Create a new volume

    Teardown:
        - Back to Instances page
        - Delete the newly created volume
        - Logout

    Test Steps:
        - Edit new volume as bootable
        - Launch volume as instance
        - Check that instance is 'active' and attached by the volume
        - Check that volume status is 'in use'
        - Delete the instance
    """
    volumes_pg_action, volume_name = volumes_pg_action
    LOG.tc_step('Edit new volume as Bootable')
    volumes_pg_action.edit_volume(volume_name, volume_name, bootable=True)
    instance_name = helper.gen_resource_name('volume_instance')

    LOG.tc_step('Launch volume {} as instance'.format(volume_name))
    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flavor_name = nova_helper.get_basic_flavor(rtn_id=False)
    volumes_pg_action.launch_as_instance(volume_name,
                                         instance_name,
                                         delete_volume_on_instance_delete=False,
                                         flavor_name=flavor_name,
                                         network_names=[mgmt_net_name])
    LOG.tc_step('Check that instance is Active and attached by the volume')
    time.sleep(5)
    instances_pg = instancespage.InstancesPage(volumes_pg_action.driver, volumes_pg_action.port)
    instances_pg.go_to_target_page()
    assert instances_pg.is_instance_active(instance_name)
    volumes_pg_action.go_to_target_page()
    assert instance_name in volumes_pg_action.get_volume_info(volume_name, "Attached To")

    LOG.tc_step('Check that volume status is In-use')
    assert volumes_pg_action.is_volume_status(volume_name, 'In-use')

    LOG.tc_step('Delete the instance')
    instances_pg.go_to_target_page()
    instances_pg.delete_instance(instance_name)
    assert instances_pg.find_message_and_dismiss(messages.INFO)
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)
    assert instances_pg.is_instance_deleted(instance_name)
    horizon.test_result = True
Esempio n. 7
0
def test_horizon_launch_instance_from_image(tenant_images_pg):
    """
    Test launch instance from image functionality:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Image

    Teardown:
        - Back to Images page
        - Logout

    Test Steps:
        - Create a new image
        - Launch new instance from image
        - Check that status of newly created instance is Active
        - Delete the newly lunched instance
        - Delete the newly created image
    """
    images_pg, image_name, image_id = tenant_images_pg

    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flv_name = nova_helper.get_basic_flavor(rtn_id=False)

    images_pg.refresh_page()
    assert images_pg.is_image_active(image_name)

    instance_name = helper.gen_resource_name('image_instance')
    LOG.tc_step('Launch new instance {} from image.'.format(instance_name))
    images_pg.launch_instance_from_image(image_name,
                                         instance_name,
                                         flavor_name=flv_name,
                                         network_names=[mgmt_net_name],
                                         create_new_volume=False)
    assert not images_pg.find_message_and_dismiss(messages.ERROR)
    instance_pg = instancespage.InstancesPage(images_pg.driver,
                                              port=images_pg.port)
    instance_pg.go_to_target_page()
    assert not instance_pg.find_message_and_dismiss(messages.ERROR)

    LOG.tc_step('Check that status of newly created instance is Active.')
    instance_pg.refresh_page()
    assert instance_pg.is_instance_active(instance_name)

    LOG.tc_step('Delete instance {}.'.format(instance_name))
    instance_pg.delete_instance_by_row(instance_name)
    assert not instance_pg.find_message_and_dismiss(messages.ERROR)
    assert instance_pg.is_instance_deleted(instance_name)

    horizon.test_result = True
Esempio n. 8
0
def get_tenant_name(auth_info=None):
    """
    Get name of given tenant. If None is given, primary tenant name will be
    returned.

    Args:
        auth_info (dict|None): Tenant dict

    Returns:
        str: name of the tenant

    """
    if auth_info is None:
        auth_info = Tenant.get_primary()
    return auth_info['tenant']
Esempio n. 9
0
def wait_for_image_sync_on_subcloud(image_id, timeout=1000, delete=False):
    if ProjVar.get_var('IS_DC'):
        if dc_helper.get_subclouds(
                field='management',
                name=ProjVar.get_var('PRIMARY_SUBCLOUD'))[0] == 'managed':
            auth_info = Tenant.get_primary()
            if delete:
                _wait_for_images_deleted(images=image_id,
                                         auth_info=auth_info,
                                         fail_ok=False,
                                         timeout=timeout)
            else:
                wait_for_image_appear(image_id,
                                      auth_info=auth_info,
                                      timeout=timeout)
Esempio n. 10
0
def test_horizon_allocate_floating_ip_admin(floating_ips_pg_admin):
    LOG.tc_step('Allocates floating ip')
    floating_ip = floating_ips_pg_admin.allocate_floatingip(tenant=Tenant.get_primary()['tenant'])

    LOG.tc_step('Verifies that the floating ip {} is present'.format(floating_ip))
    assert floating_ips_pg_admin.find_message_and_dismiss(messages.SUCCESS)
    assert not floating_ips_pg_admin.find_message_and_dismiss(messages.ERROR)
    assert floating_ips_pg_admin.is_floatingip_present(floating_ip)

    LOG.tc_step('Releases the floating ip')
    floating_ips_pg_admin.release_floatingip(floating_ip)
    assert floating_ips_pg_admin.find_message_and_dismiss(messages.SUCCESS)
    assert not floating_ips_pg_admin.find_message_and_dismiss(messages.ERROR)
    LOG.tc_step('Verifies that the floating ip does not appear in the table')
    assert not floating_ips_pg_admin.is_floatingip_present(floating_ip)
    horizon.test_result = True
Esempio n. 11
0
def test_horizon_network_subnet_create_admin(get_pnet, admin_networks_pg):
    """
    Test the network creation and deletion functionality:

    Setups:
        - Login as Admin
        - Go to Admin > Network > Networks

    Teardown:
        - Back to Networks page
        - Logout

    Test Steps:
        - Create a new network with subnet
        - Verify the network appears in the networks table as active
        - Delete the newly created network
        - Verify the network does not appear in the table after deletion
    """
    pnet_name, pnet_type = get_pnet
    network_name = helper.gen_resource_name('network')
    subnet_name = helper.gen_resource_name('subnet')

    LOG.tc_step('Create new network {}.'.format(network_name))
    admin_networks_pg.create_network(network_name,
                                     project=Tenant.get_primary()['tenant'],
                                     provider_network_type=pnet_type,
                                     physical_network=pnet_name,
                                     segmentation_id=300,
                                     subnet_name=subnet_name,
                                     network_address='192.168.0.0/24')
    assert admin_networks_pg.find_message_and_dismiss(messages.SUCCESS)
    assert not admin_networks_pg.find_message_and_dismiss(messages.ERROR)

    LOG.tc_step('Verify the network appears in the networks table as active')
    assert admin_networks_pg.is_network_present(network_name)
    assert admin_networks_pg.get_network_info(network_name,
                                              'Status') == 'Active'

    LOG.tc_step('Delete network {}.'.format(network_name))
    admin_networks_pg.delete_network_by_row(network_name)
    assert admin_networks_pg.find_message_and_dismiss(messages.SUCCESS)
    assert not admin_networks_pg.find_message_and_dismiss(messages.ERROR)

    LOG.tc_step(
        'Verify the network does not appear in the table after deletion')
    assert not admin_networks_pg.is_network_present(network_name)
    horizon.test_result = True
Esempio n. 12
0
def snat_setups(request):
    find_dvr = 'True' if request.param == 'distributed' else 'False'

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info in [primary_tenant, other_tenant]:
        tenant_router = network_helper.get_tenant_router(auth_info=auth_info)
        is_dvr_router = network_helper.get_router_values(router_id=tenant_router,
                                                         fields='distributed')[0]
        if find_dvr == str(is_dvr_router):
            LOG.fixture_step("Setting primary tenant to {}".format(common.get_tenant_name(auth_info)))
            Tenant.set_primary(auth_info)
            break
    else:
        skip("No {} router found on system.".format(request.param))

    LOG.fixture_step("Update router to enable SNAT")
    network_helper.set_router_gateway(enable_snat=True)     # Check snat is handled by the keyword

    def disable_snat():
        LOG.fixture_step("Disable SNAT on tenant router")
        try:
            network_helper.set_router_gateway(enable_snat=False)
        finally:
            LOG.fixture_step("Revert primary tenant to {}".format(primary_tenant['tenant']))
            Tenant.set_primary(primary_tenant)
    request.addfinalizer(disable_snat)

    LOG.fixture_step("Boot a VM from volume")
    vm_id = vm_helper.boot_vm(name='snat', reuse_vol=False, cleanup='module')[1]

    if system_helper.is_avs():
        LOG.fixture_step("Attempt to ping from NatBox and ensure if fails")
        ping_res = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, fail_ok=True, use_fip=False)
        assert ping_res is False, "VM can still be ping'd from outside after SNAT enabled without floating ip."

    LOG.fixture_step("Create a floating ip and associate it to VM")
    floatingip = network_helper.create_floating_ip(cleanup='module')[1]
    network_helper.associate_floating_ip_to_vm(floatingip, vm_id)

    LOG.fixture_step("Ping vm's floating ip from NatBox and ensure it's reachable")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=60, use_fip=True)

    return vm_id, floatingip
Esempio n. 13
0
def test_horizon_create_delete_instance(instances_pg):
    """
    Test the instance creation and deletion functionality:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Instance

    Teardown:
        - Back to Instances page
        - Logout

    Test Steps:
        - Create a new instance
        - Verify the instance appears in the instances table as active
        - Delete the newly lunched instance
        - Verify the instance does not appear in the table after deletion
    """
    instances_pg, instance_name = instances_pg

    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flavor_name = nova_helper.get_basic_flavor(rtn_id=False)
    guest_img = GuestImages.DEFAULT['guest']

    LOG.tc_step('Create new instance {}'.format(instance_name))
    instances_pg.create_instance(instance_name,
                                 boot_source_type='Image',
                                 source_name=guest_img,
                                 flavor_name=flavor_name,
                                 network_names=mgmt_net_name,
                                 create_new_volume=False)
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)

    LOG.tc_step('Verify the instance appears in the instances table as active')
    assert instances_pg.is_instance_active(instance_name)

    LOG.tc_step('Delete instance {}'.format(instance_name))
    instances_pg.delete_instance_by_row(instance_name)
    assert instances_pg.find_message_and_dismiss(messages.INFO)
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)

    LOG.tc_step(
        'Verify the instance does not appear in the table after deletion')
    assert instances_pg.is_instance_deleted(instance_name)
    horizon.test_result = True
Esempio n. 14
0
def set_current_user_password(original_password, new_password, fail_ok=False,
                              auth_info=None, con_ssh=None):
    """
    Set password for current user
    Args:
        original_password:
        new_password:
        fail_ok:
        auth_info:
        con_ssh:

    Returns (tuple):

    """
    args = "--password '{}' --original-password '{}'".format(new_password, original_password)
    code, output = cli.openstack('user password set', args, ssh_client=con_ssh,
                                 auth_info=auth_info, fail_ok=fail_ok)
    if code > 0:
        return 1, output

    if not auth_info:
        auth_info = Tenant.get_primary()

    user = auth_info['user']
    tenant_dictname = user
    if auth_info.get('platform'):
        tenant_dictname += '_platform'
    Tenant.update(tenant_dictname, password=new_password)

    if user == 'admin':
        from consts.proj_vars import ProjVar
        if ProjVar.get_var('REGION') != 'RegionOne':
            LOG.info(
                "Run openstack_update_admin_password on secondary region "
                "after admin password change")
            if not con_ssh:
                con_ssh = ControllerClient.get_active_controller()
            with con_ssh.login_as_root(timeout=30) as con_ssh:
                con_ssh.exec_cmd(
                    "echo 'y' | openstack_update_admin_password '{}'".format(new_password))

    msg = 'User {} password successfully updated from {} to {}'.format(user, original_password,
                                                                       new_password)
    LOG.info(msg)
    return 0, output
Esempio n. 15
0
def tenant_images_pg(tenant_home_pg_container, request):
    LOG.fixture_step('Go to Project > Compute > Images')
    image_name = helper.gen_resource_name('image')
    images_pg = imagespage.ImagesPage(tenant_home_pg_container.driver,
                                      port=tenant_home_pg_container.port)
    images_pg.go_to_target_page()
    LOG.fixture_step('Create new image {}'.format(image_name))
    image_id = glance_helper.create_image(image_name,
                                          auth_info=Tenant.get_primary())[1]
    image_name = glance_helper.get_image_values(image_id, 'Name')[0]

    def teardown():
        LOG.fixture_step('Back to Images page')
        images_pg.go_to_target_page()
        LOG.fixture_step('Delete image {}'.format(image_name))
        images_pg.delete_image(image_name)

    request.addfinalizer(teardown)
    return images_pg, image_name, image_id
Esempio n. 16
0
def add_admin_role(request):

    if not system_helper.is_avs():
        skip("vshell commands unsupported by OVS")

    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()
    tenants = [primary_tenant, other_tenant]
    res = []
    for auth_info in tenants:
        code = keystone_helper.add_or_remove_role(add_=True, role='admin', user=auth_info.get('user'),
                                                  project=auth_info.get('tenant'))[0]
        res.append(code)

    def remove_admin_role():
        for i in range(len(res)):
            if res[i] != -1:
                auth_info_ = tenants[i]
                keystone_helper.add_or_remove_role(add_=False, role='admin', user=auth_info_.get('user'),
                                                   project=auth_info_.get('tenant'))

    request.addfinalizer(remove_admin_role)
Esempio n. 17
0
def __login_base(request, driver, auth_info, port=None):

    horizon.test_result = False
    if not auth_info:
        auth_info = Tenant.get_primary()

    user = auth_info['user']
    password = auth_info['password']
    project = auth_info['tenant']
    if not port and not auth_info.get('platform'):
        port = 31000

    gmttime = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S")
    video_path = ProjVar.get_var('LOG_DIR') + '/horizon/' + \
        str(gmttime) + '.mp4'
    recorder = video_recorder.VideoRecorder(1920, 1080, os.environ['DISPLAY'],
                                            video_path)
    recorder.start()
    home_pg = None

    try:
        LOG.fixture_step('Login as {}'.format(user))
        login_pg = loginpage.LoginPage(driver, port=port)
        login_pg.go_to_target_page()
        home_pg = login_pg.login(user=user, password=password)
        home_pg.change_project(name=project)
    finally:

        def teardown():
            if home_pg:
                LOG.fixture_step('Logout')
                home_pg.log_out()
            recorder.stop()
            if horizon.test_result:
                recorder.clear()

        request.addfinalizer(teardown)

    return home_pg
Esempio n. 18
0
def pre_alarms_session():
    if container_helper.is_stx_openstack_deployed():
        from keywords import network_helper
        for auth_info in (Tenant.get_primary(), Tenant.get_secondary()):
            project = auth_info['tenant']
            default_group = network_helper.get_security_groups(
                auth_info=auth_info, name='default', strict=True)
            if not default_group:
                LOG.info(
                    "No default security group for {}. Skip security group "
                    "rule config.".format(project))
                continue

            default_group = default_group[0]
            security_rules = network_helper.get_security_group_rules(
                auth_info=auth_info,
                **{
                    'IP Protocol': ('tcp', 'icmp'),
                    'Security Group': default_group
                })
            if len(security_rules) >= 2:
                LOG.info(
                    "Default security group rules for {} already configured "
                    "to allow ping and ssh".format(project))
                continue

            LOG.info(
                "Create icmp and ssh security group rules for {} with best "
                "effort".format(project))
            for rules in (('icmp', None), ('tcp', 22)):
                protocol, dst_port = rules
                network_helper.create_security_group_rule(group=default_group,
                                                          protocol=protocol,
                                                          dst_port=dst_port,
                                                          fail_ok=True,
                                                          auth_info=auth_info)

    return __get_alarms('session')
Esempio n. 19
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
Esempio n. 20
0
def exec_cli(cmd,
             sub_cmd,
             positional_args='',
             client=None,
             flags='',
             fail_ok=False,
             cli_dir='',
             auth_info=None,
             source_openrc=None,
             timeout=CLI_TIMEOUT):
    """

    Args:
        cmd: such as 'neutron'
        sub_cmd: such as 'net-show'
        client: SSHClient, TelnetClient or LocalHostClient
        positional_args: string or list.
            Single arg examples: 'arg0' or ['arg0']
            Multiple args string example: 'arg1 arg2'
            Multiple args list example: ['arg1','arg2']
        flags: string or list.
            Single arg examples: 'arg0 value0' or ['arg0 value']
            Multiple args string example: 'arg1 value1 arg2 value2 arg3'
            Multiple args list example: ['arg1 value1','arg2 value2', 'arg3']
        auth_info: (dict) authorization information to run cli commands.
        source_openrc (None|bool): In general this should NOT be set unless
        necessary.
        fail_ok:
        cli_dir:
        timeout:

    Returns:
        if command executed successfully: return command_output
        if command failed to execute such as authentication failure:
            if fail_ok: return exit_code, command_output
            if not fail_ok: raise exception
    """
    use_telnet = True if isinstance(client, TelnetClient) else False

    # Determine region and auth_url
    raw_cmd = cmd.strip().split()[0]
    is_dc = ProjVar.get_var('IS_DC')
    platform_cmds = ('system', 'fm')

    if auth_info is None:
        auth_info = Tenant.get_primary()

    platform = True if auth_info.get('platform') else False

    if not platform and ProjVar.get_var('OPENSTACK_DEPLOYED') is False:
        skip('stx-openstack application is not applied.')

    region = auth_info.get('region')
    dc_region = region if region and is_dc else None
    default_region_and_url = Tenant.get_region_and_url(platform=platform,
                                                       dc_region=dc_region)

    region = region if region else default_region_and_url['region']
    auth_url = auth_info.get('auth_url', default_region_and_url['auth_url'])

    if is_dc:
        # Set proper region when cmd is against DC central cloud. This is
        # needed due to the same auth_info may be
        # passed to different keywords that require different region
        if region in ('RegionOne', 'SystemController'):
            region = 'RegionOne' if raw_cmd in platform_cmds else \
                'SystemController'

        # # Reset auth_url if cmd is against DC central cloud RegionOne
        # containerized services. This is needed due to
        # # the default auth_url for central controller RegionOne is platform
        # auth_url
        # if region == 'RegionOne' and not platform:
        #     auth_url = default_region_and_url['auth_url']

    positional_args = __convert_args(positional_args)
    flags = __convert_args(flags)

    if not use_telnet and not client:
        if is_dc:
            # This may not exist if cli cmd used before DC vars are initialized
            client = ControllerClient.get_active_controller(name=region,
                                                            fail_ok=True)

        if not client:
            client = ControllerClient.get_active_controller()

    if source_openrc is None:
        source_openrc = ProjVar.get_var('SOURCE_OPENRC')

    if source_openrc:
        source_file = _get_rc_path(user=auth_info['user'], platform=platform)
        if use_telnet:
            cmd = 'source {}; {}'.format(source_file, cmd)
        else:
            source_openrc_file(ssh_client=client,
                               auth_info=auth_info,
                               rc_file=source_file,
                               fail_ok=fail_ok)
        flags = ''
    elif auth_info:
        # auth params
        auth_args = (
            "--os-username '{}' --os-password '{}' --os-project-name {} "
            "--os-auth-url {} "
            "--os-user-domain-name Default --os-project-domain-name Default".
            format(auth_info['user'], auth_info['password'],
                   auth_info['tenant'], auth_url))

        flags = '{} {}'.format(auth_args.strip(), flags.strip())

    # internal URL handling
    if raw_cmd in ('openstack', 'sw-manager'):
        flags += ' --os-interface internal'
    else:
        flags += ' --os-endpoint-type internalURL'

    # region handling
    if raw_cmd != 'dcmanager':
        if raw_cmd == 'cinder':
            flags += ' --os_region_name {}'.format(region)
        else:
            flags += ' --os-region-name {}'.format(region)

    complete_cmd = ' '.join(
        [os.path.join(cli_dir, cmd),
         flags.strip(), sub_cmd, positional_args]).strip()

    # workaround for dcmanager cmd not supporting --os-project-name
    if complete_cmd.startswith('dcmanager'):
        complete_cmd = complete_cmd.replace('--os-project-name',
                                            '--os-tenant-name')

    kwargs = {'searchwindowsize': 100} if not use_telnet else {}
    exit_code, cmd_output = client.exec_cmd(complete_cmd,
                                            expect_timeout=timeout,
                                            **kwargs)

    if exit_code == 0:
        return 0, cmd_output

    if fail_ok and exit_code in [1, 2]:
        return 1, cmd_output

    raise exceptions.CLIRejected(
        "CLI '{}' failed to execute. Output: {}".format(
            complete_cmd, cmd_output))
Esempio n. 21
0
def test_horizon_manage_volume_attachments(instances_pg):
    """
    Test the attach/detach actions for volume:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Instances

    Teardown:
        - Back to Instances page
        - Logout

    Test Steps:
        - Create a new instance
        - Go to Project -> Compute -> Volumes, create volume
        - Attach the volume to the newly created instance
        - Check that volume is In-use and link to instance
        - Detach volume from instance
        - Check volume is Available
        - Delete the volume
        - Delete the instance
    """
    instances_pg, volume_name = instances_pg
    instance_name = helper.gen_resource_name('volume_attachment')

    LOG.tc_step('Create new instance {}'.format(instance_name))
    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flavor_name = nova_helper.get_basic_flavor(rtn_id=False)
    guest_img = GuestImages.DEFAULT['guest']
    instances_pg.create_instance(instance_name,
                                 boot_source_type='Image',
                                 create_new_volume=False,
                                 source_name=guest_img,
                                 flavor_name=flavor_name,
                                 network_names=[mgmt_net_name])
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)
    assert instances_pg.is_instance_active(instance_name)

    LOG.tc_step('Go to Project -> Compute -> Volumes, create volume {}'.format(volume_name))
    volumes_pg = volumespage.VolumesPage(instances_pg.driver, instances_pg.port)
    volumes_pg.go_to_target_page()
    time.sleep(3)
    volumes_pg.create_volume(volume_name)
    assert (volumes_pg.is_volume_status(volume_name, 'Available'))

    LOG.tc_step('Attach the volume to the newly created instance')
    volumes_pg.attach_volume_to_instance(volume_name, instance_name)

    LOG.tc_step('Check that volume is In-use and link to instance')
    assert volumes_pg.is_volume_status(volume_name, 'In-use')
    assert instance_name in volumes_pg.get_volume_info(volume_name, 'Attached To')

    LOG.tc_step('Detach volume from instance')
    volumes_pg.detach_volume_from_instance(volume_name, instance_name)

    LOG.tc_step('Check volume is Available instead of In-use')
    assert volumes_pg.is_volume_status(volume_name, 'Available')

    LOG.tc_step('Delete the volume {}'.format(volume_name))
    volumes_pg.delete_volume(volume_name)
    assert volumes_pg.is_volume_deleted(volume_name)

    LOG.tc_step('Delete the instance {}'.format(instance_name))
    instances_pg.go_to_target_page()
    instances_pg.delete_instance(instance_name)
    instances_pg.find_message_and_dismiss(messages.SUCCESS)
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)
    assert instances_pg.is_instance_deleted(instance_name)
    horizon.test_result = True
Esempio n. 22
0
    def _prepare_test(vm1, vm2, get_hosts, with_router):
        """
        VMs:
            VM1: under test (primary tenant)
            VM2: traffic observer
        """

        vm1_host = vm_helper.get_vm_host(vm1)
        vm2_host = vm_helper.get_vm_host(vm2)
        vm1_router = network_helper.get_tenant_router(
            auth_info=Tenant.get_primary())
        vm2_router = network_helper.get_tenant_router(
            auth_info=Tenant.get_secondary())
        vm1_router_host = network_helper.get_router_host(router=vm1_router)
        vm2_router_host = network_helper.get_router_host(router=vm2_router)
        targets = list(get_hosts)

        if vm1_router_host == vm2_router_host:
            end_time = time.time() + 360
            while time.time() < end_time:
                vm1_router_host = network_helper.get_router_host(
                    router=vm1_router)
                vm2_router_host = network_helper.get_router_host(
                    router=vm2_router)
                if vm1_router_host != vm2_router_host:
                    break
            else:
                assert vm1_router_host != vm2_router_host, "two routers are located on the same compute host"

        if not with_router:
            """
            Setup:
                VM1 on COMPUTE-A
                VM2 not on COMPUTE-A
                ROUTER1 on COMPUTE-B
                ROUTER2 on COMPUTE-C
            """
            if len(get_hosts) < 3:
                skip(
                    "Lab not suitable for without_router, requires at least three hypervisors"
                )

            LOG.tc_step(
                "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute"
            )
            if vm2_host != vm2_router_host:
                vm_helper.live_migrate_vm(vm_id=vm2,
                                          destination_host=vm2_router_host)
                vm2_host = vm_helper.get_vm_host(vm2)
                assert vm2_host == vm2_router_host, "live-migration failed"
            host_observer = vm2_host

            LOG.tc_step(
                "Ensure VM1 and (ROUTER1, VM2, ROUTER2) are on different hosts"
            )
            if vm1_router_host in targets:
                # ensure vm1_router_host is not selected for vm1
                # vm1_router_host can be backed by any type of storage
                targets.remove(vm1_router_host)
            if vm2_host in targets:
                targets.remove(vm2_host)

            if vm1_host in targets:
                host_src_evacuation = vm1_host
            else:
                assert targets, "no suitable compute for vm1, after excluding ROUTER1, VM2, ROUTER2 's hosts"
                host_src_evacuation = targets[0]
                vm_helper.live_migrate_vm(vm_id=vm1,
                                          destination_host=host_src_evacuation)
                vm1_host = vm_helper.get_vm_host(vm1)
                assert vm1_host == host_src_evacuation, "live-migration failed"

            # verify setup
            vm1_host = vm_helper.get_vm_host(vm1)
            vm2_host = vm_helper.get_vm_host(vm2)
            vm1_router_host = network_helper.get_router_host(router=vm1_router)
            vm2_router_host = network_helper.get_router_host(router=vm2_router)
            assert vm1_router_host != vm1_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \
                "setup is incorrect"
        else:
            """
            Setup:
                VM1, ROUTER1 on COMPUTE-A
                VM2 not on COMPUTE-A
                ROUTER2 on COMPUTE-B 
            """
            LOG.tc_step("Ensure VM1, ROUTER1 on COMPUTE-A")

            # VM1 must be sitting on ROUTER1's host, thus vm1_router_host must be backed by local_image
            assert vm1_router_host in targets, "vm1_router_host is not backed by local_image"

            if vm1_host != vm1_router_host:
                vm_helper.live_migrate_vm(vm_id=vm1,
                                          destination_host=vm1_router_host)
                vm1_host = vm_helper.get_vm_host(vm1)
                assert vm1_host == vm1_router_host, "live-migration failed"
            host_src_evacuation = vm1_host

            LOG.tc_step(
                "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute"
            )
            targets.remove(host_src_evacuation)
            if vm2_host in targets:
                host_observer = vm2_host
            else:
                assert targets, "no suitable compute for vm2, after excluding COMPUTE-A"
                host_observer = targets[0]
                vm_helper.live_migrate_vm(vm_id=vm2,
                                          destination_host=host_observer)
                vm2_host = vm_helper.get_vm_host(vm2)
                assert vm2_host == host_observer, "live-migration failed"

            # verify setup
            vm1_host = vm_helper.get_vm_host(vm1)
            vm2_host = vm_helper.get_vm_host(vm2)
            vm1_router_host = network_helper.get_router_host(router=vm1_router)
            vm2_router_host = network_helper.get_router_host(router=vm2_router)
            assert vm1_host == vm1_router_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \
                "setup is incorrect"

        assert vm1_host == host_src_evacuation and vm2_host == host_observer, "setup is incorrect"
        LOG.info("Evacuate: VM {} on {}, ROUTER on {}".format(
            vm1, vm1_host, vm1_router_host))
        LOG.info("Observer: VM {} on {}, ROUTER on {}".format(
            vm2, vm2_host, vm2_router_host))

        return host_src_evacuation, host_observer
Esempio n. 23
0
def tenant_home_pg_container(driver, request):
    return __login_base(request=request,
                        driver=driver,
                        auth_info=Tenant.get_primary())
Esempio n. 24
0
def setup_keypair(con_ssh, natbox_client=None):
    """
    copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/
    Args:
        natbox_client (SSHClient): NATBox client
        con_ssh (SSHClient)
    """
    """
    copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/
    Args:
        natbox_client (SSHClient): NATBox client
        con_ssh (SSHClient)
    """
    if not container_helper.is_stx_openstack_deployed(con_ssh=con_ssh):
        LOG.info("stx-openstack is not applied. Skip nova keypair config.")
        return

    # ssh private key should now exist under keyfile_path
    if not natbox_client:
        natbox_client = NATBoxClient.get_natbox_client()

    LOG.info("scp key file from controller to NATBox")
    # keyfile path that can be specified in testcase config
    keyfile_stx_origin = os.path.normpath(ProjVar.get_var('STX_KEYFILE_PATH'))

    # keyfile will always be copied to sysadmin home dir first and update file
    # permission
    keyfile_stx_final = os.path.normpath(
        ProjVar.get_var('STX_KEYFILE_SYS_HOME'))
    public_key_stx = '{}.pub'.format(keyfile_stx_final)

    # keyfile will also be saved to /opt/platform as well, so it won't be
    # lost during system upgrade.
    keyfile_opt_pform = '/opt/platform/{}'.format(
        os.path.basename(keyfile_stx_final))

    # copy keyfile to following NatBox location. This can be specified in
    # testcase config
    keyfile_path_natbox = os.path.normpath(
        ProjVar.get_var('NATBOX_KEYFILE_PATH'))

    auth_info = Tenant.get_primary()
    keypair_name = auth_info.get('nova_keypair',
                                 'keypair-{}'.format(auth_info['user']))
    nova_keypair = nova_helper.get_keypairs(name=keypair_name,
                                            auth_info=auth_info)

    linux_user = HostLinuxUser.get_user()
    nonroot_group = _get_nonroot_group(con_ssh=con_ssh, user=linux_user)
    if not con_ssh.file_exists(keyfile_stx_final):
        with host_helper.ssh_to_host('controller-0',
                                     con_ssh=con_ssh) as con_0_ssh:
            if not con_0_ssh.file_exists(keyfile_opt_pform):
                if con_0_ssh.file_exists(keyfile_stx_origin):
                    # Given private key file exists. Need to ensure public
                    # key exists in same dir.
                    if not con_0_ssh.file_exists('{}.pub'.format(
                            keyfile_stx_origin)) and not nova_keypair:
                        raise FileNotFoundError(
                            '{}.pub is not found'.format(keyfile_stx_origin))
                else:
                    # Need to generate ssh key
                    if nova_keypair:
                        raise FileNotFoundError(
                            "Cannot find private key for existing nova "
                            "keypair {}".format(nova_keypair))

                    con_0_ssh.exec_cmd(
                        "ssh-keygen -f '{}' -t rsa -N ''".format(
                            keyfile_stx_origin),
                        fail_ok=False)
                    if not con_0_ssh.file_exists(keyfile_stx_origin):
                        raise FileNotFoundError(
                            "{} not found after ssh-keygen".format(
                                keyfile_stx_origin))

                # keyfile_stx_origin and matching public key should now exist
                # on controller-0
                # copy keyfiles to home dir and opt platform dir
                con_0_ssh.exec_cmd('cp {} {}'.format(keyfile_stx_origin,
                                                     keyfile_stx_final),
                                   fail_ok=False)
                con_0_ssh.exec_cmd('cp {}.pub {}'.format(
                    keyfile_stx_origin, public_key_stx),
                                   fail_ok=False)
                con_0_ssh.exec_sudo_cmd('cp {} {}'.format(
                    keyfile_stx_final, keyfile_opt_pform),
                                        fail_ok=False)

            # Make sure owner is sysadmin
            # If private key exists in opt platform, then it must also exist
            # in home dir
            con_0_ssh.exec_sudo_cmd('chown {}:{} {}'.format(
                linux_user, nonroot_group, keyfile_stx_final),
                                    fail_ok=False)

        # ssh private key should now exists under home dir and opt platform
        # on controller-0
        if con_ssh.get_hostname() != 'controller-0':
            # copy file from controller-0 home dir to controller-1
            con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(),
                                source_ip='controller-0',
                                source_path=keyfile_stx_final,
                                source_pswd=HostLinuxUser.get_password(),
                                dest_path=keyfile_stx_final,
                                timeout=60)

    if not nova_keypair:
        LOG.info("Create nova keypair {} using public key {}".format(
            nova_keypair, public_key_stx))
        if not con_ssh.file_exists(public_key_stx):
            con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(),
                                source_ip='controller-0',
                                source_path=public_key_stx,
                                source_pswd=HostLinuxUser.get_password(),
                                dest_path=public_key_stx,
                                timeout=60)
            con_ssh.exec_sudo_cmd('chown {}:{} {}'.format(
                linux_user, nonroot_group, public_key_stx),
                                  fail_ok=False)

        if ProjVar.get_var('REMOTE_CLI'):
            dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'),
                                     os.path.basename(public_key_stx))
            common.scp_from_active_controller_to_localhost(
                source_path=public_key_stx, dest_path=dest_path, timeout=60)
            public_key_stx = dest_path
            LOG.info("Public key file copied to localhost: {}".format(
                public_key_stx))

        nova_helper.create_keypair(keypair_name,
                                   public_key=public_key_stx,
                                   auth_info=auth_info)

    natbox_client.exec_cmd('mkdir -p {}'.format(
        os.path.dirname(keyfile_path_natbox)))
    tis_ip = ProjVar.get_var('LAB').get('floating ip')
    for i in range(10):
        try:
            natbox_client.scp_on_dest(source_ip=tis_ip,
                                      source_user=HostLinuxUser.get_user(),
                                      source_pswd=HostLinuxUser.get_password(),
                                      source_path=keyfile_stx_final,
                                      dest_path=keyfile_path_natbox,
                                      timeout=120)
            LOG.info("private key is copied to NatBox: {}".format(
                keyfile_path_natbox))
            break
        except exceptions.SSHException as e:
            if i == 9:
                raise

            LOG.info(e.__str__())
            time.sleep(10)
Esempio n. 25
0
def add_or_remove_role(add_=True, role='admin', project=None, user=None,
                       domain=None, group=None, group_domain=None,
                       project_domain=None, user_domain=None, inherited=None,
                       check_first=True, fail_ok=False,
                       con_ssh=None, auth_info=Tenant.get('admin')):
    """
    Add or remove given role for specified user and tenant. e.g., add admin
    role to tenant2 user on tenant2 project

    Args:
        add_(bool): whether to add or remove
        role (str): an existing role from openstack role list
        project (str): tenant name. When unset, the primary tenant name
            will be used
        user (str): an existing user that belongs to given tenant
        domain (str): Include <domain> (name or ID)
        group (str): Include <group> (name or ID)
        group_domain (str): Domain the group belongs to (name or ID).
            This can be used in case collisions between group names exist.
        project_domain (str): Domain the project belongs to (name or ID).
            This can be used in case collisions between project names exist.
        user_domain (str): Domain the user belongs to (name or ID).
            This can be used in case collisions between user names exist.
        inherited (bool): Specifies if the role grant is inheritable to the
            sub projects
        check_first (bool): whether to check if role already exists for given
            user and tenant
        fail_ok (bool): whether to throw exception on failure
        con_ssh (SSHClient): active controller ssh session
        auth_info (dict): auth info to use to executing the add role cli

    Returns (tuple):

    """
    tenant_dict = {}

    if project is None:
        if auth_info and auth_info.get('platform'):
            project = auth_info['tenant']
        else:
            tenant_dict = Tenant.get_primary()
            project = tenant_dict['tenant']

    if user is None:
        user = tenant_dict.get('user', project)

    if check_first:
        existing_roles = get_role_assignments(role=role, project=project,
                                              user=user,
                                              user_domain=user_domain,
                                              group=group,
                                              group_domain=group_domain,
                                              domain=domain,
                                              project_domain=project_domain,
                                              inherited=inherited,
                                              effective_only=False,
                                              con_ssh=con_ssh,
                                              auth_info=auth_info)
        if existing_roles:
            if add_:
                msg = "Role already exists with given criteria: {}".format(
                    existing_roles)
                LOG.info(msg)
                return -1, msg
        else:
            if not add_:
                msg = "Role with given criteria does not exist. Do nothing."
                LOG.info(msg)
                return -1, msg

    msg_str = 'Add' if add_ else 'Remov'
    LOG.info(
        "{}ing {} role to {} user under {} project".format(msg_str, role, user,
                                                           project))

    sub_cmd = "--user {} --project {}".format(user, project)
    if inherited is True:
        sub_cmd += ' --inherited'

    optional_args = {
        'domain': domain,
        'group': group,
        'group-domain': group_domain,
        'project-domain': project_domain,
        'user-domain': user_domain,
    }

    for key, val in optional_args.items():
        if val is not None:
            sub_cmd += ' --{} {}'.format(key, val)

    sub_cmd += ' {}'.format(role)

    cmd = 'role add' if add_ else 'role remove'
    res, out = cli.openstack(cmd, sub_cmd, ssh_client=con_ssh, fail_ok=fail_ok,
                             auth_info=auth_info)

    if res == 1:
        return 1, out

    LOG.info("{} cli accepted. Check role is {}ed "
             "successfully".format(cmd, msg_str))
    post_roles = get_role_assignments(role=role, project=project, user=user,
                                      user_domain=user_domain, group=group,
                                      group_domain=group_domain, domain=domain,
                                      project_domain=project_domain,
                                      inherited=inherited, effective_only=True,
                                      con_ssh=con_ssh, auth_info=auth_info)

    err_msg = ''
    if add_ and not post_roles:
        err_msg = "No role is added with given criteria"
    elif post_roles and not add_:
        err_msg = "Role is not removed"
    if err_msg:
        if fail_ok:
            LOG.warning(err_msg)
            return 2, err_msg
        else:
            raise exceptions.KeystoneError(err_msg)

    succ_msg = "Role is successfully {}ed".format(msg_str)
    LOG.info(succ_msg)
    return 0, succ_msg
Esempio n. 26
0
def test_horizon_floating_ip_associate_disassociate(instances_pg):
    """
    Tests the floating-ip allocate/release functionality:

    Setups:
        - Login as Tenant
        - Go to Project > Compute > Instances

    Teardown:
        - Back to Instances page
        - Logout

    Test Steps:
        - Create a new instance
        - Allocates floating ip
        - Associate floating ip to the instance and verify it
        - Disassociate floating ip to the instance and verify it
        - Release Floating ip
        - Delete the instance
    """
    instance_name = helper.gen_resource_name('instance')
    LOG.tc_step('Create new instance {}'.format(instance_name))
    mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net'])
    flv_name = nova_helper.get_basic_flavor(rtn_id=False)
    guest_img = GuestImages.DEFAULT['guest']

    instances_pg.create_instance(instance_name,
                                 boot_source_type='Image',
                                 create_new_volume=False,
                                 source_name=guest_img,
                                 flavor_name=flv_name,
                                 network_names=[mgmt_net_name])
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)
    assert instances_pg.is_instance_active(instance_name)

    instance_ipv4 = instances_pg.get_fixed_ipv4(instance_name)
    instance_info = "{} {}".format(instance_name, instance_ipv4)

    floating_ips_page = project_floatingipspage.FloatingipsPage(instances_pg.driver, port=instances_pg.port)
    floating_ips_page.go_to_target_page()

    LOG.tc_step('Allocates floating ip')
    floating_ip = floating_ips_page.allocate_floatingip()
    assert floating_ips_page.find_message_and_dismiss(messages.SUCCESS)
    assert not floating_ips_page.find_message_and_dismiss(messages.ERROR)
    assert floating_ips_page.is_floatingip_present(floating_ip)

    assert '-' == floating_ips_page.get_floatingip_info(floating_ip, 'Mapped Fixed IP Address')

    LOG.tc_step('Associate floating ip to {} and verify'.format(instance_name))
    floating_ips_page.associate_floatingip(floating_ip, instance_name, instance_ipv4)
    assert floating_ips_page.find_message_and_dismiss(messages.SUCCESS)
    assert not floating_ips_page.find_message_and_dismiss(messages.ERROR)
    assert instance_info == floating_ips_page.get_floatingip_info(floating_ip, 'Mapped Fixed IP Address')

    LOG.tc_step('Disassociate floating ip to {} and verify'.format(instance_name))
    floating_ips_page.disassociate_floatingip(floating_ip)
    assert floating_ips_page.find_message_and_dismiss(messages.SUCCESS)
    assert not floating_ips_page.find_message_and_dismiss(messages.ERROR)
    assert '-' == floating_ips_page.get_floatingip_info(floating_ip, 'Mapped Fixed IP Address')

    LOG.tc_step('Release Floating ip')
    floating_ips_page.release_floatingip(floating_ip)
    assert floating_ips_page.find_message_and_dismiss(messages.SUCCESS)
    assert not floating_ips_page.find_message_and_dismiss(messages.ERROR)
    assert not floating_ips_page.is_floatingip_present(floating_ip)

    LOG.tc_step('Delete instance {}'.format(instance_name))
    instances_pg.go_to_target_page()
    instances_pg.delete_instance(instance_name)
    assert instances_pg.find_message_and_dismiss(messages.INFO)
    assert not instances_pg.find_message_and_dismiss(messages.ERROR)
    assert instances_pg.is_instance_deleted(instance_name)
    horizon.test_result = True