def pytest_collectstart(): """ Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase. """ global con_ssh con_ssh = setups.setup_tis_ssh(InstallVars.get_install_var("LAB")) InstallVars.set_install_var(con_ssh=con_ssh) auth = setups.get_auth_via_openrc(con_ssh) if auth: CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh)) Tenant.set_platform_url(CliAuth.get_var('OS_AUTH_URL')) Tenant.set_region(CliAuth.get_var('OS_REGION_NAME'))
def pytest_collectstart(): """ Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase. """ global con_ssh lab = ProjVar.get_var("LAB") if 'vbox' in lab['short_name']: con_ssh = setups.setup_vbox_tis_ssh(lab) else: con_ssh = setups.setup_tis_ssh(lab) ProjVar.set_var(con_ssh=con_ssh) CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh)) Tenant.set_region(region=CliAuth.get_var('OS_REGION_NAME')) Tenant.set_platform_url(url='OS_AUTH_URL')
def set_region(region=None): """ set global variable region. This needs to be called after CliAuth.set_vars, since the custom region value needs to override what is specified in openrc file. local region and auth url is saved in CliAuth, while the remote region and auth url is saved in Tenant. Args: region: region to set """ local_region = CliAuth.get_var('OS_REGION_NAME') if not region: if ProjVar.get_var('IS_DC'): region = 'SystemController' else: region = local_region Tenant.set_region(region=region) ProjVar.set_var(REGION=region) if re.search(SUBCLOUD_PATTERN, region): # Distributed cloud, lab specified is a subcloud. urls = keystone_helper.get_endpoints(region=region, field='URL', interface='internal', service_name='keystone') if not urls: raise ValueError( "No internal endpoint found for region {}. Invalid value for " "--region with specified lab." "sub-cloud tests can be run on controller, but not the other " "way round".format(region)) Tenant.set_platform_url(urls[0])
def base_url(self): from consts.auth import CliAuth if CliAuth.get_var('HTTPS'): prefix = 'https' lab_name = ProjVar.get_var('LAB').get('name') if not lab_name: skip('Skip https testing on unknown lab') domain = '{}.cumulus.wrs.com'.format( lab_name.split('yow-')[-1].replace('_', '-')) if self.port and self.port == 31000: domain = ProjVar.get_var('OPENSTACK_DOMAIN') if not domain: skip( 'OpenStack endpoint domain not found in service parameters. Skip ' 'OpenStack horizon test with https.') else: prefix = 'http' domain = ProjVar.get_var("LAB")['floating ip'] if ProjVar.get_var('IPV6_OAM'): domain = '[{}]'.format(domain) if not self.port: self.port = 8080 if prefix == 'http' else 8443 base_url = '{}://{}:{}'.format(prefix, domain, self.port) # horizon url matt if not base_url.endswith('/'): base_url += '/' return base_url
def test_clis(): print(CliAuth.get_var('HTTPS')) cli.system('host-list') cli.system('host-show controller-0') cli.openstack('server list') cli.openstack('stack list') ceilometer_helper.get_alarms() keystone_helper.get_endpoints() cli.openstack('router list') cli.openstack('volume list') cli.openstack('image list')
def pytest_collectstart(): """ Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase. """ global initialized if not initialized: global con_ssh con_ssh = setups.setup_tis_ssh(ProjVar.get_var("LAB")) ProjVar.set_var(con_ssh=con_ssh) CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh)) if setups.is_https(con_ssh): CliAuth.set_vars(HTTPS=True) auth_url = CliAuth.get_var('OS_AUTH_URL') Tenant.set_platform_url(auth_url) setups.set_region(region=None) if ProjVar.get_var('IS_DC'): Tenant.set_platform_url(url=auth_url, central_region=True) initialized = True
def base_url(self): from consts.auth import CliAuth prefix = 'http' if CliAuth.get_var('HTTPS'): prefix = 'https' oam_ip = ProjVar.get_var("LAB")['floating ip'] if not self.port: self.port = 8080 if prefix == 'http' else 8443 base_url = '{}://{}:{}'.format(prefix, oam_ip, self.port) # horizon url matt if not base_url.endswith('/'): base_url += '/' return base_url
def get_url(dnsname=False): """ Get the base url of the Horizon application Args: dnsname(bool): True if return the dns name of the host instead of the IP Returns(str): the url on the active controller to access Horizon """ domain = common.get_lab_fip(region='RegionOne') if not dnsname else \ common.get_dnsname(region='RegionOne') prefix = 'https' if CliAuth.get_var('https') else 'http' port = 8080 if prefix == 'http' else 8443 return '{}://{}:{}'.format(prefix, domain, port)
def driver(request): auth_info = Tenant.get('admin_platform') if CliAuth.get_var('HTTPS') and container_helper.is_stx_openstack_deployed( auth_info=auth_info): openstack_domain = system_helper.get_service_parameter_values( service='openstack', section='helm', name='endpoint_domain', auth_info=auth_info) domain = openstack_domain[0] if openstack_domain else None ProjVar.set_var(openstack_domain=domain) driver_ = HorizonDriver.get_driver() def teardown(): HorizonDriver.quit_driver() request.addfinalizer(teardown) return driver_
def _test_firewall_rules_default(): """ Verify default ports are open. Test Steps: - Confirm iptables service is running on active controller - Check if lab is http(s), add corresponding port to check - Confirm the default ports are open - Swact and repeat the above steps """ # Cannot test connecting to the ports as they are in use. default_ports = [ 123, 161, 199, 5000, 6080, 6385, 8000, 8003, 8004, 8041, 8774, 8776, 8778, 9292, 9696, 15491 ] from consts.proj_vars import ProjVar region = ProjVar.get_var('REGION') if region != 'RegionOne' and region in MULTI_REGION_MAP: default_ports.remove(5000) default_ports.remove(9292) default_ports.append(8443) if CliAuth.get_var( 'HTTPS') else default_ports.append(8080) active_controller = system_helper.get_active_controller_name() con_ssh = ControllerClient.get_active_controller() _verify_iptables_status(con_ssh, active_controller) _check_ports_with_netstat(con_ssh, active_controller, default_ports) active_controller, new_active = system_helper.get_active_standby_controllers( ) if new_active: LOG.tc_step( "Swact {} and verify firewall rules".format(active_controller)) host_helper.swact_host(active_controller) con_ssh = ControllerClient.get_active_controller() _verify_iptables_status(con_ssh, new_active) _check_ports_with_netstat(con_ssh, new_active, default_ports)
def __init__(self, serviceName, platform=False): """ Initiate an object for handling REST calls. Args: serviceName - """ auth_info = Tenant.get('admin_platform') if platform else Tenant.get( 'admin') self.token = "" self.token_payload = "" self.region = ProjVar.get_var('REGION') self.baseURL = keystone_helper.get_endpoints(field='URL', service_name=serviceName, interface="public", region=self.region, auth_info=auth_info)[0] self.ksURL = keystone_helper.get_endpoints(field='URL', service_name='keystone', interface="public", region=self.region, auth_info=auth_info)[0] self.cert_path = None self.verify = True self.is_https = CliAuth.get_var('HTTPS') if self.is_https: self.verify = False cert_path = os.path.join(ProjVar.get_var('TEMP_DIR'), 'server-with-key.pem') if not os.path.exists(cert_path): cert_path = security_helper.fetch_cert_file(scp_to_local=True) self.cert_path = cert_path if cert_path: self.verify = cert_path self.generate_token_request() self.retrieve_token('/auth/tokens')
def modify_https(enable_https=True, check_first=True, con_ssh=None, auth_info=Tenant.get('admin_platform'), fail_ok=False): """ Modify platform https via 'system modify https_enable=<bool>' Args: enable_https (bool): True/False to enable https or not check_first (bool): if user want to check if the lab is already in the state that user try to enable con_ssh (SSHClient): auth_info (dict): fail_ok (bool): Returns (tuple): (-1, msg) (0, msg) (1, <std_err>) """ if check_first: is_https = keystone_helper.is_https_enabled(source_openrc=False, auth_info=auth_info, con_ssh=con_ssh) if (is_https and enable_https) or (not is_https and not enable_https): msg = "Https is already {}. Do nothing.".format( 'enabled' if enable_https else 'disabled') LOG.info(msg) return -1, msg LOG.info("Modify system to {} https".format( 'enable' if enable_https else 'disable')) res, output = system_helper.modify_system(fail_ok=fail_ok, con_ssh=con_ssh, auth_info=auth_info, https_enabled='{}'.format( str(enable_https).lower())) if res == 1: return 1, output LOG.info("Wait up to 60s for config out-of-date alarm with best effort.") system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, entity_id='controller-', strict=False, con_ssh=con_ssh, timeout=60, fail_ok=True, auth_info=auth_info) LOG.info("Wait up to 600s for config out-of-date alarm to clear.") system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE, con_ssh=con_ssh, timeout=600, check_interval=20, fail_ok=False, auth_info=auth_info) LOG.info("Wait up to 300s for public endpoints to be updated") expt_status = 'enabled' if enable_https else 'disabled' end_time = time.time() + 300 while time.time() < end_time: if keystone_helper.is_https_enabled(con_ssh=con_ssh, source_openrc=False, auth_info=auth_info) == \ enable_https: break time.sleep(10) else: raise exceptions.KeystoneError( "Https is not {} in 'openstack endpoint list'".format(expt_status)) msg = 'Https is {} successfully'.format(expt_status) LOG.info(msg) # TODO: install certificate for https. There will be a warning msg if # self-signed certificate is used if not ProjVar.get_var('IS_DC') or \ (auth_info and auth_info.get('region', None) in ( 'RegionOne', 'SystemController')): # If DC, use the central region https as system https, since that is # the one used for external access CliAuth.set_vars(HTTPS=enable_https) return 0, msg
def configure_dovetail_server(hosts_per_personality): """ - Update env_config.sh on dovetail test node - Update tempest_conf.yaml min_compute_nodes count - Update nova-api process count in docker overlay monitor_process.py - Update monitor.py - Create pod.yaml file on localhost and scp to dovetail test node Args: hosts_per_personality: """ con_ssh = ControllerClient.get_active_controller() # # Do not modify the tool # nova_proc_count = int(con_ssh.exec_cmd('ps -fC nova-api | grep nova | wc -l')[1]) # assert nova_proc_count > 0, "0 nova-api processes running on active controller" LOG.fixture_step("Update {} on dovetail test node".format(Dovetail.ENV_SH)) admin_dict = Tenant.get('admin') tenant_name = admin_dict['tenant'] keystone_public_url = keystone_helper.get_endpoints( service_name='keystone', interface='public', region=admin_dict['region'], field='url')[0] env_conf_dict = { 'OS_PROJECT_NAME': tenant_name, 'OS_PROJECT_ID': keystone_helper.get_projects(field='ID', name=tenant_name)[0], 'OS_TENANT_NAME': tenant_name, 'OS_USERNAME': admin_dict['user'], 'OS_PASSWORD': admin_dict['password'], 'OS_AUTH_URL': keystone_public_url.replace(':', r'\:').replace(r'/', r'\/'), 'OS_IDENTITY_API_VERSION': CliAuth.get_var('OS_IDENTITY_API_VERSION'), } Dovetail.set_auth_url(keystone_public_url) ComplianceCreds.set_host(Dovetail.TEST_NODE) ComplianceCreds.set_user(Dovetail.USERNAME) ComplianceCreds.set_password(Dovetail.PASSWORD) with compliance_helper.ssh_to_compliance_server() as compliance_ssh: env_path = Dovetail.ENV_SH for var, value in env_conf_dict.items(): compliance_ssh.exec_cmd( 'sed -i "s/^export {}=.*/export {}={}/g" {}'.format( var, var, value, env_path)) compliance_ssh.exec_cmd('grep "export {}={}" {}'.format( var, value, env_path), fail_ok=False) LOG.fixture_step("Update tempest_conf.yaml min_compute_nodes count") compliance_ssh.exec_sudo_cmd( 'sed -i "s/^ min_compute_nodes:.*/ min_compute_nodes: {}/g" {}'. format(len(hosts_per_personality['compute']), Dovetail.TEMPEST_YAML)) # # Do not modify the tool # LOG.fixture_step("Update nova-api process count in docker overlay monitor_process.py") # file_path = compliance_ssh.exec_sudo_cmd("find / -name monitor_process.py")[1] # LOG.fixture_step('Fixing monitor.py located at {}'.format(file_path)) # compliance_ssh.exec_sudo_cmd("sed -ie 's/processes=.*/processes={}/g' {}".format(nova_proc_count, file_path)) compliance_helper.add_route_for_vm_access(compliance_ssh) LOG.fixture_step( "Collect hosts info, create pod.yaml file on localhost and scp to dovetail test node" ) import yaml yaml_nodes = [] controllers = hosts_per_personality['controller'] computes = hosts_per_personality['compute'] node_count = 1 for host in controllers: node_ip = con_ssh.exec_cmd( 'nslookup {} | grep -A 2 "Name:" | grep --color=never "Address:"'. format(host), fail_ok=False)[1].split('Address:')[1].strip() yaml_nodes.append({ 'name': 'node{}'.format(node_count), 'role': 'Controller', 'ip': node_ip, 'user': '******', 'password': HostLinuxUser.get_password() }) node_count += 1 for compute in computes: node_ip = con_ssh.exec_cmd( 'nslookup {} | grep -A 2 "Name:" | grep --color=never "Address:"'. format(compute), fail_ok=False)[1].split('Address:')[1].strip() yaml_nodes.append({ 'name': 'node{}'.format(node_count), 'role': 'Compute', 'ip': node_ip, 'user': '******', 'password': HostLinuxUser.get_password() }) node_count += 1 pod_yaml_dict = {'nodes': yaml_nodes} local_path = '{}/pod.yaml'.format(ProjVar.get_var('TEMP_DIR')) with open(local_path, 'w') as f: yaml.dump(pod_yaml_dict, f, default_flow_style=False) common.scp_from_local(source_path=local_path, dest_path=Dovetail.POD_YAML, dest_ip=Dovetail.TEST_NODE, dest_user=Dovetail.USERNAME, dest_password=Dovetail.PASSWORD, timeout=30)