Esempio n. 1
0
def test_clis():
    print(CliAuth.get_var('HTTPS'))
    cli.system('host-list')
    cli.system('host-show controller-0')
    cli.openstack('server list')
    cli.openstack('stack list')
    ceilometer_helper.get_alarms()
    keystone_helper.get_endpoints()
    cli.openstack('router list')
    cli.openstack('volume list')
    cli.openstack('image list')
Esempio n. 2
0
def set_region(region=None):
    """
    set global variable region.
    This needs to be called after CliAuth.set_vars, since the custom region
    value needs to override what is
    specified in openrc file.

    local region and auth url is saved in CliAuth, while the remote region
    and auth url is saved in Tenant.

    Args:
        region: region to set

    """
    local_region = CliAuth.get_var('OS_REGION_NAME')
    if not region:
        if ProjVar.get_var('IS_DC'):
            region = 'SystemController'
        else:
            region = local_region
    Tenant.set_region(region=region)
    ProjVar.set_var(REGION=region)
    if re.search(SUBCLOUD_PATTERN, region):
        # Distributed cloud, lab specified is a subcloud.
        urls = keystone_helper.get_endpoints(region=region,
                                             field='URL',
                                             interface='internal',
                                             service_name='keystone')
        if not urls:
            raise ValueError(
                "No internal endpoint found for region {}. Invalid value for "
                "--region with specified lab."
                "sub-cloud tests can be run on controller, but not the other "
                "way round".format(region))
        Tenant.set_platform_url(urls[0])
def verify_swift_object_setup():

    LOG.info("Verifying  swift endpoints...")
    port = '7480'
    endpoints_url = keystone_helper.get_endpoints(field='URL',
                                                  service_name='swift',
                                                  interface='public')[0]
    LOG.info("Swift  public endpoint url: {}".format(endpoints_url))
    url_port = endpoints_url.split(':')[2].split('/')[0].strip()
    if url_port != port:
        LOG.warning(
            "Swift endpoint  use unexpected port {}. Expected port is {}.".
            format(url_port, port))
        return False

    LOG.info("Verifying if swift object pools are setup...")

    if 'ceph' in storage_helper.get_storage_backends():
        con_ssh = ControllerClient.get_active_controller()
        cmd = "rados df | awk 'NR>1 && NR < 11 {{print $1}}'"
        rc, output = con_ssh.exec_cmd(cmd, fail_ok=True)
        LOG.info("Swift object pools:{}".format(output))

        if rc == 0:
            pools = output.split('\n')
            if set(SWIFT_POOLS).issubset(pools):
                LOG.info(
                    "Swift object pools: {}  are set...".format(SWIFT_POOLS))
            else:
                LOG.info("Expected Swift object pools: {}"
                         " are NOT set. Pools = {}".format(SWIFT_POOLS, pools))
                return False
        else:
            return False

    LOG.info(
        "Verifying if swift object service (ceph-radosgw) is listed via 'sudo sm-dump' on the "
        "active controller...")
    cmd = "sm-dump | grep ceph-radosgw | awk ' {print $1\" \" $2\" \" $3}'"
    con_ssh = ControllerClient.get_active_controller()
    rc, output = con_ssh.exec_sudo_cmd(cmd, fail_ok=True)

    if rc == 0 and "ceph-radosgw enabled-active enabled-active" in output:
        LOG.info(
            "swift object service (ceph-radosgw) is listed via 'sudo sm-dump' on the active controller..."
        )
    else:
        LOG.warning(
            " Unable to verify Swift object service ceph-radosgw: {}.".format(
                output))
        return False
    return True
Esempio n. 4
0
    def __init__(self, serviceName, platform=False):
        """
        Initiate an object for handling REST calls.
        Args:
            serviceName -

        """
        auth_info = Tenant.get('admin_platform') if platform else Tenant.get(
            'admin')
        self.token = ""
        self.token_payload = ""
        self.region = ProjVar.get_var('REGION')
        self.baseURL = keystone_helper.get_endpoints(field='URL',
                                                     service_name=serviceName,
                                                     interface="public",
                                                     region=self.region,
                                                     auth_info=auth_info)[0]
        self.ksURL = keystone_helper.get_endpoints(field='URL',
                                                   service_name='keystone',
                                                   interface="public",
                                                   region=self.region,
                                                   auth_info=auth_info)[0]
        self.cert_path = None
        self.verify = True
        self.is_https = CliAuth.get_var('HTTPS')
        if self.is_https:
            self.verify = False
            cert_path = os.path.join(ProjVar.get_var('TEMP_DIR'),
                                     'server-with-key.pem')
            if not os.path.exists(cert_path):
                cert_path = security_helper.fetch_cert_file(scp_to_local=True)
            self.cert_path = cert_path
            if cert_path:
                self.verify = cert_path

        self.generate_token_request()
        self.retrieve_token('/auth/tokens')
Esempio n. 5
0
def get_swift_public_url(con_ssh=None, auth_info=Tenant.get('admin')):
    endpoints_url = keystone_helper.get_endpoints(field='URL', service_name='swift', interface='public',
                                                  con_ssh=con_ssh, auth_info=auth_info)
    LOG.info("Swift endpoints URL: {}".format(endpoints_url))
    return endpoints_url[0]
Esempio n. 6
0
def check_services_access(service_name=None,
                          region=None,
                          auth=True,
                          verify=True,
                          use_dnsname=True,
                          auth_info=Tenant.get('admin_platform')):
    """
    Check public endpoints of services are reachable via get request
    Args:
        service_name(str|list|None): filter only certainly services to check
        region(str|None): filter only the endpoints from a certain region
        auth(bool): perform the requests with an authentication from keystone
        verify(bool|str):
            True: if https is enabled, verify the cert with the default CA
            False: equivalent to --insecure in curl cmd
            str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert
        use_dnsname(bool): True if use dns name instead of IP to perform the rest request
        auth_info(dict):

    Returns(None):

    """
    if not use_dnsname:
        verify = False
    LOG.info('Check services access via curl')
    token = None
    if auth:
        token = get_auth_token(region=region,
                               auth_info=auth_info,
                               use_dnsname=use_dnsname)
    headers = {'X-Auth-Token': token} if token else None

    if service_name:
        urls_to_check = []
        if isinstance(service_name, str):
            service_name = [service_name]
        for service in service_name:
            url = keystone_helper.get_endpoints(field='URL',
                                                interface='public',
                                                region=region,
                                                enabled='True',
                                                service_name=service,
                                                auth_info=auth_info)
            if url:
                urls_to_check.append(url)
            else:
                LOG.warn(
                    '{} service\'s public endpoint not found or not enabled')
    else:
        urls_to_check = keystone_helper.get_endpoints(field='URL',
                                                      interface='public',
                                                      region=region,
                                                      enabled='True',
                                                      auth_info=auth_info)
    if use_dnsname:
        lab_ip = common.get_lab_fip(region=region)
        lab_dns_name = common.get_dnsname(region=region)
        urls_to_check = [
            url.replace(lab_ip, lab_dns_name) for url in urls_to_check
        ]

    for url in urls_to_check:
        # FIXME skip unreachable port 7777 (sm-api) until CGTS-19988 is resolved
        # FIXME skip unreachable port 8219 (dcdbsync) until 1892391 is resolved
        if url.endswith('7777') or url.endswith('8219/v1.0'):
            continue
        check_url_access(url=url, headers=headers, verify=verify)
Esempio n. 7
0
def get_auth_token(region=None,
                   auth_info=Tenant.get('admin_platform'),
                   use_dnsname=True):
    """
    Get an authentication token from keystone
    Args:
        region(str): the cloud region for get the keystone token
        auth_info:
        use_dnsname(bool): True if use dns name instead of IP to perform the rest request

    Returns(str|None): Authentication token

    """
    keystone_endpoint = keystone_helper.get_endpoints(field='URL',
                                                      service_name='keystone',
                                                      interface="public",
                                                      region=region,
                                                      auth_info=auth_info)[0]
    keystone_url = '{}/{}'.format(keystone_endpoint, 'auth/tokens')
    if use_dnsname:
        lab_ip = common.get_lab_fip(region=region)
        lab_dns_name = common.get_dnsname(region=region)
        keystone_url = keystone_url.replace(lab_ip, lab_dns_name)
    LOG.info(
        'Get authentication token from keystone url {}'.format(keystone_url))
    headers = {'Content-type': 'application/json'}
    body = {
        'auth': {
            'identity': {
                'methods': ['password'],
                'password': {
                    'user': {
                        'domain': {
                            'name': 'Default'
                        },
                        'name': 'admin',
                        'password': '******'
                    }
                }
            },
            'scope': {
                'project': {
                    'name': 'admin',
                    'domain': {
                        'name': 'Default'
                    }
                }
            }
        }
    }
    try:
        req = requests.post(url=keystone_url,
                            headers=headers,
                            data=json.dumps(body),
                            verify=False)
    except Exception as e:
        LOG.error('Error trying to get a token')
        LOG.debug(e)
        return None
    LOG.debug(
        '\n{} {}\nHeaders: {}\nBody: {}\nResponse code: {}\nResponse body: {}'.
        format(req.request.method, req.request.url, req.request.headers,
               req.request.body, req.status_code, req.text))
    LOG.info('Status: [{}]'.format(req.status_code))
    req.raise_for_status()
    return req.headers.get('X-Subject-Token')
Esempio n. 8
0
def refstack_setup(refstack_pre_check, request):

    LOG.fixture_step("Enable Swift if not already done")
    storage_helper.modify_swift(enable=True)

    LOG.fixture_step("Create tenants, users, and update quotas")
    compliance_helper.create_tenants_and_update_quotas(add_swift_role=True)

    LOG.fixture_step("Create test flavors")
    flavors = []
    for i in range(2):
        flavor_id = nova_helper.create_flavor(name='refstack', vcpus=2, ram=2048, root_disk=2, cleanup='session')[1]
        nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated',
                                             FlavorSpec.MEM_PAGE_SIZE: 2048})
        flavors.append(flavor_id)

    LOG.fixture_step("Get/create test images")
    images = [glance_helper.get_image_id_from_name()]
    image_id = glance_helper.create_image()[1]
    images.append(image_id)
    ResourceCleanup.add('image', image_id, scope='session')

    LOG.fixture_step("Setup public router if not already done.")
    external_net_id = network_helper.get_networks(external=True)[0]
    public_router = 'public-router0'
    pub_routers = network_helper.get_routers(name=public_router, auth_info=Tenant.get('admin'))
    if not pub_routers:
        LOG.info("Create public router and add interfaces")
        public_router_id = network_helper.create_router(name=public_router, project=Tenant.get('admin')['tenant'])[1]
        network_helper.set_router_gateway(router_id=public_router_id, external_net=external_net_id)

        internal_subnet = 'internal0-subnet0-1'
        gateway = '10.1.1.1'
        network_helper.set_subnet(subnet=internal_subnet, gateway=gateway)
        network_helper.add_router_interface(router=public_router_id, subnet=internal_subnet,
                                            auth_info=Tenant.get('admin'))

    keystone_pub = keystone_helper.get_endpoints(field='URL', interface='public', service_name='keystone')[0]
    keystone_pub_url = keystone_pub.split('/v')[0] + '/'
    keystone_pub_url = keystone_pub_url.replace(':', '\:').replace('/', '\/')

    params_dict = {
        'image_ref': images[0],
        'image_ref_alt': images[1],
        'flavor_ref': flavors[0],
        'flavor_ref_alt': flavors[1],
        'public_network_id': external_net_id,
        'uri': keystone_pub_url + 'v2.0',
        'uri_v3': keystone_pub_url + 'v3',
        'discoverable_apis': 'tempurl,container_quotas',
        'container_sync': 'false',
        'object_versioning': 'true',
        'discoverability': 'false',
    }

    LOG.fixture_step("Update tempest.conf parameters on cumulus server: \n{}".format(params_dict))
    with compliance_helper.ssh_to_compliance_server() as server_ssh:
        for key, val in params_dict.items():
            server_ssh.exec_cmd('sed -i "s/^{} =.*/{} = {}/g" {}'.format(key, key, val, RefStack.TEMPEST_CONF),
                                fail_ok=False)
            server_ssh.exec_cmd('grep {} {}'.format(val, RefStack.TEMPEST_CONF), fail_ok=False)

        compliance_helper.add_route_for_vm_access(server_ssh)

    def scp_logs():
        LOG.info("scp test results files from refstack test host to local automation dir")
        dest_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'compliance')
        os.makedirs(dest_dir, exist_ok=True)
        localhost = LocalHostClient()
        localhost.connect()

        for item in RefStack.LOG_FILES:
            source_path = '{}/{}'.format(RefStack.TEST_HISTORY_DIR, item)
            localhost.scp_on_dest(source_ip=ComplianceCreds.get_host(), source_user=ComplianceCreds.get_user(),
                                  source_pswd=ComplianceCreds.get_password(), source_path=source_path,
                                  dest_path=dest_dir, timeout=300, cleanup=False)

        origin_name = ComplianceVar.get_var('REFSTACK_SUITE').rsplit(r'/', maxsplit=1)[-1]
        localhost.exec_cmd('mv {}/test-list.txt {}/{}'.format(dest_dir, dest_dir, origin_name))
    request.addfinalizer(scp_logs)
Esempio n. 9
0
def configure_dovetail_server(hosts_per_personality):
    """
    - Update env_config.sh on dovetail test node
    - Update tempest_conf.yaml min_compute_nodes count
    - Update nova-api process count in docker overlay monitor_process.py
    - Update monitor.py
    - Create pod.yaml file on localhost and scp to dovetail test node

    Args:
        hosts_per_personality:

    """
    con_ssh = ControllerClient.get_active_controller()
    # # Do not modify the tool
    # nova_proc_count = int(con_ssh.exec_cmd('ps -fC nova-api | grep nova | wc -l')[1])
    # assert nova_proc_count > 0, "0 nova-api processes running on active controller"

    LOG.fixture_step("Update {} on dovetail test node".format(Dovetail.ENV_SH))
    admin_dict = Tenant.get('admin')
    tenant_name = admin_dict['tenant']
    keystone_public_url = keystone_helper.get_endpoints(
        service_name='keystone',
        interface='public',
        region=admin_dict['region'],
        field='url')[0]
    env_conf_dict = {
        'OS_PROJECT_NAME':
        tenant_name,
        'OS_PROJECT_ID':
        keystone_helper.get_projects(field='ID', name=tenant_name)[0],
        'OS_TENANT_NAME':
        tenant_name,
        'OS_USERNAME':
        admin_dict['user'],
        'OS_PASSWORD':
        admin_dict['password'],
        'OS_AUTH_URL':
        keystone_public_url.replace(':', r'\:').replace(r'/', r'\/'),
        'OS_IDENTITY_API_VERSION':
        CliAuth.get_var('OS_IDENTITY_API_VERSION'),
    }

    Dovetail.set_auth_url(keystone_public_url)
    ComplianceCreds.set_host(Dovetail.TEST_NODE)
    ComplianceCreds.set_user(Dovetail.USERNAME)
    ComplianceCreds.set_password(Dovetail.PASSWORD)
    with compliance_helper.ssh_to_compliance_server() as compliance_ssh:
        env_path = Dovetail.ENV_SH
        for var, value in env_conf_dict.items():
            compliance_ssh.exec_cmd(
                'sed -i "s/^export {}=.*/export {}={}/g" {}'.format(
                    var, var, value, env_path))
            compliance_ssh.exec_cmd('grep "export {}={}" {}'.format(
                var, value, env_path),
                                    fail_ok=False)

        LOG.fixture_step("Update tempest_conf.yaml min_compute_nodes count")
        compliance_ssh.exec_sudo_cmd(
            'sed -i "s/^  min_compute_nodes:.*/  min_compute_nodes: {}/g" {}'.
            format(len(hosts_per_personality['compute']),
                   Dovetail.TEMPEST_YAML))

        # # Do not modify the tool
        # LOG.fixture_step("Update nova-api process count in docker overlay monitor_process.py")
        # file_path = compliance_ssh.exec_sudo_cmd("find / -name monitor_process.py")[1]
        # LOG.fixture_step('Fixing monitor.py located at {}'.format(file_path))
        # compliance_ssh.exec_sudo_cmd("sed -ie 's/processes=.*/processes={}/g' {}".format(nova_proc_count, file_path))

        compliance_helper.add_route_for_vm_access(compliance_ssh)

    LOG.fixture_step(
        "Collect hosts info, create pod.yaml file on localhost and scp to dovetail test node"
    )
    import yaml
    yaml_nodes = []
    controllers = hosts_per_personality['controller']
    computes = hosts_per_personality['compute']

    node_count = 1
    for host in controllers:
        node_ip = con_ssh.exec_cmd(
            'nslookup {} | grep -A 2 "Name:" | grep --color=never "Address:"'.
            format(host),
            fail_ok=False)[1].split('Address:')[1].strip()
        yaml_nodes.append({
            'name': 'node{}'.format(node_count),
            'role': 'Controller',
            'ip': node_ip,
            'user': '******',
            'password': HostLinuxUser.get_password()
        })
        node_count += 1

    for compute in computes:
        node_ip = con_ssh.exec_cmd(
            'nslookup {} | grep -A 2 "Name:" | grep --color=never "Address:"'.
            format(compute),
            fail_ok=False)[1].split('Address:')[1].strip()
        yaml_nodes.append({
            'name': 'node{}'.format(node_count),
            'role': 'Compute',
            'ip': node_ip,
            'user': '******',
            'password': HostLinuxUser.get_password()
        })
        node_count += 1

    pod_yaml_dict = {'nodes': yaml_nodes}
    local_path = '{}/pod.yaml'.format(ProjVar.get_var('TEMP_DIR'))
    with open(local_path, 'w') as f:
        yaml.dump(pod_yaml_dict, f, default_flow_style=False)

    common.scp_from_local(source_path=local_path,
                          dest_path=Dovetail.POD_YAML,
                          dest_ip=Dovetail.TEST_NODE,
                          dest_user=Dovetail.USERNAME,
                          dest_password=Dovetail.PASSWORD,
                          timeout=30)