def revert_https(request): central_auth = Tenant.get('admin_platform', dc_region='RegionOne') sub_auth = Tenant.get('admin_platform') origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth) origin_https_central = keystone_helper.is_https_enabled( auth_info=central_auth) def _revert(): LOG.fixture_step( "Revert central https config to {}.".format(origin_https_central)) security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth) LOG.fixture_step( "Revert subcloud https config to {}.".format(origin_https_sub)) security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth) LOG.fixture_step("Verify cli's on subcloud and central region.".format( origin_https_sub)) verify_cli(sub_auth, central_auth) request.addfinalizer(_revert) return origin_https_sub, origin_https_central, central_auth, sub_auth
def revert_https(request): """ Fixture for get the current http mode of the system, and if the test fails, leave the system in the same mode than before """ central_auth = Tenant.get('admin_platform', dc_region='RegionOne') sub_auth = Tenant.get('admin_platform') use_dnsname = (bool(common.get_dnsname()) and bool(common.get_dnsname(region=ProjVar.get_var('PRIMARY_SUBCLOUD')))) origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth) origin_https_central = keystone_helper.is_https_enabled(auth_info=central_auth) def _revert(): LOG.fixture_step("Revert central https config to {}.".format(origin_https_central)) security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth) LOG.fixture_step("Revert subcloud https config to {}.".format(origin_https_sub)) security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth) LOG.fixture_step("Verify cli's on subcloud and central region.".format(origin_https_sub)) verify_cli(sub_auth, central_auth) request.addfinalizer(_revert) return origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname
def create_url(ip=None, port=None, version=None, extension=None): """ Creates a url with the given parameters inn the form: http(s)://<ip address>:<port>/<version>/<extension> Args: ip (str): the main ip address. If set to None will be set to the lab's ip address by default. port (int): the port number to connect to. version (str): for REST API. version number, e.g. "v1", "v2.0" extension (str): extensions to add to the url Returns (str): a url created with the given parameters """ if keystone_helper.is_https_enabled() is True: url = 'https://' else: url = 'http://' if ip: url += ip else: url += get_ip_addr() if port: url += ':{}'.format(port) if version: url += '/{}'.format(version) if extension: url += '/{}'.format(extension) return url
def test_remote_cli(): LOG.info("Download openrc files from horizon") horizon_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon') tenant1 = Tenant.get('tenant1')['tenant'] tenant2 = Tenant.get('tenant2')['tenant'] admin_openrc = '{}-openrc.sh'.format(Tenant.get('admin')['tenant']) tenant1_openrc = '{}-openrc.sh'.format(tenant1) tenant2_openrc = '{}-openrc.sh'.format(tenant2) # from utils.horizon.pages.project.apiaccesspage import ApiAccessPage from utils.horizon.pages import loginpage driver = HorizonDriver.get_driver() login_pg = loginpage.LoginPage(driver) login_pg.go_to_target_page() home_pg = login_pg.login('admin', 'Li69nux*') home_pg.download_rc_v3() # api_access_page = ApiAccessPage(home_pg.driver) # api_access_page.go_to_target_page() # api_access_page.download_openstack_rc_file() assert os.path.exists(os.path.join(horizon_dir, admin_openrc)), "{} not found after download".format(admin_openrc) # api_access_page.change_project(name=tenant1) # api_access_page.download_openstack_rc_file() home_pg.change_project(name=tenant1) home_pg.download_rc_v3() assert os.path.exists(os.path.join(horizon_dir, tenant1_openrc)), \ "{} not found after download".format(tenant1_openrc) # api_access_page.change_project(name=tenant2) # api_access_page.download_openstack_rc_file() home_pg.change_project(name=tenant2) home_pg.download_rc_v3() assert os.path.exists(os.path.join(horizon_dir, tenant2_openrc)), \ "{} not found after download".format(tenant2_openrc) RemoteCLIClient.get_remote_cli_client() nova_helper.get_basic_flavor() cinder_helper.get_volume_qos_list() glance_helper.get_images() system_helper.get_computes() ceilometer_helper.get_alarms() keystone_helper.is_https_enabled()
def https_config(request): is_https = keystone_helper.is_https_enabled() def _revert(): if not is_https: LOG.fixture_step("Revert system to https {}.".format('enabled' if is_https else 'disabled')) security_helper.modify_https(enable_https=is_https) request.addfinalizer(_revert) return is_https
def test_dc_modify_https(revert_https): """ Test enable/disable https Test Steps: - Ensure central region https to be different than subcloud - Wait for subcloud sync audit and ensure subcloud https is not changed - Verify cli's in subcloud and central region - Modify https on central and subcloud - Verify cli's in subcloud and central region Teardown: - Revert https config on central and subcloud """ origin_https_sub, origin_https_central, central_auth, sub_auth = revert_https subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') new_https_sub = not origin_https_sub new_https_central = not origin_https_central LOG.tc_step( "Ensure central region https to be different than {}".format(subcloud)) security_helper.modify_https(enable_https=new_https_sub, auth_info=central_auth) LOG.tc_step( "Wait for subcloud sync audit and ensure {} https is not changed". format(subcloud)) dc_helper.wait_for_sync_audit(subclouds=subcloud) assert origin_https_sub == keystone_helper.is_https_enabled( auth_info=sub_auth), "HTTPS config changed in subcloud" LOG.tc_step("Verify cli's in {} and central region".format(subcloud)) verify_cli(sub_auth, central_auth) if new_https_central != new_https_sub: LOG.tc_step("Set central region https to {}".format(new_https_central)) security_helper.modify_https(enable_https=new_https_central, auth_info=central_auth) LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub)) security_helper.modify_https(enable_https=new_https_sub, auth_info=sub_auth) LOG.tc_step( "Verify cli's in {} and central region after https modify on subcloud". format(subcloud)) verify_cli(sub_auth, central_auth)
def check_lab_status(request): current_lab = ProjVar.get_var('lab') if not current_lab or not current_lab.get('tpm_installed', False): skip('Non-TPM lab, skip the test.') if not keystone_helper.is_https_enabled(): skip('Non-HTTPs lab, skip the test.') ssh_client = ControllerClient.get_active_controller() working_ssl_file = os.path.join(HostLinuxUser.get_home(), testing_ssl_file) LOG.info('backup default ssl pem file to:' + working_ssl_file) ssh_client.exec_sudo_cmd('cp -f ' + default_ssl_file + ' ' + testing_ssl_file) def cleaup(): ssh_client.exec_sudo_cmd('rm -rf ' + working_ssl_file) backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir) ssh_client.exec_sudo_cmd('rm -rf ' + backup_dir) LOG.info('remove saved configuration files on local') if os.path.exists(local_conf_backup_dir): shutil.rmtree(local_conf_backup_dir) request.addfinalizer(cleaup)
def test_dc_modify_https(revert_https): """ Test enable/disable https Test Steps: - Ensure central region and subcloud admin endpoint are https - Ensure central region https to be different than subcloud - Wait for subcloud sync audit and ensure subcloud https is not changed - Verify cli's in subcloud and central region - Modify https on central and subcloud - Verify cli's in subcloud and central region - swact central and subcloud - Ensure central region and subcloud admin endpoint are https Teardown: - Revert https config on central and subcloud """ origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname = revert_https subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') LOG.tc_step( "Before testing, Ensure central region and subcloud admin internal endpoint are https") assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ "Central region admin internal endpoint is not https" assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ "Subcloud admin internal endpoint is not https" new_https_sub = not origin_https_sub new_https_central = not origin_https_central LOG.tc_step("Ensure central region https to be different than {}".format(subcloud)) security_helper.modify_https(enable_https=new_https_sub, auth_info=central_auth) LOG.tc_step('Check public endpoints accessibility for central region') security_helper.check_services_access(region='RegionOne', auth_info=central_auth, use_dnsname=use_dnsname) LOG.tc_step('Check platform horizon accessibility') security_helper.check_platform_horizon_access(use_dnsname=use_dnsname) LOG.tc_step("Wait for subcloud sync audit with best effort and ensure {} https is not " "changed".format(subcloud)) dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660) assert origin_https_sub == keystone_helper.is_https_enabled(auth_info=sub_auth), \ "HTTPS config changed in subcloud" LOG.tc_step("Verify cli's in {} and central region".format(subcloud)) verify_cli(sub_auth, central_auth) if new_https_central != new_https_sub: LOG.tc_step("Set central region https to {}".format(new_https_central)) security_helper.modify_https(enable_https=new_https_central, auth_info=central_auth) LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https") assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ "Central region admin internal endpoint is not https" assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ "Subcloud admin internal endpoint is not https" LOG.tc_step('Check public endpoints accessibility for central region') security_helper.check_services_access(region='RegionOne', auth_info=central_auth, use_dnsname=use_dnsname) LOG.tc_step('Check platform horizon accessibility') security_helper.check_platform_horizon_access(use_dnsname=use_dnsname) LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub)) security_helper.modify_https(enable_https=new_https_sub, auth_info=sub_auth) LOG.tc_step('Check public endpoints accessibility for {} region'.format(subcloud)) security_helper.check_services_access(region=subcloud, auth_info=sub_auth, use_dnsname=use_dnsname) LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https") assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ "Central region admin internal endpoint is not https" assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ "Subcloud admin internal endpoint is not https" LOG.tc_step("Verify cli's in {} and central region after https modify on " "subcloud".format(subcloud)) verify_cli(sub_auth, central_auth) LOG.tc_step("Swact on central region") host_helper.swact_host(auth_info=central_auth) LOG.tc_step( "Verify cli's in {} and central region after central region swact" .format(subcloud)) verify_cli(sub_auth, central_auth) if not system_helper.is_aio_simplex(auth_info=sub_auth): LOG.tc_step("Swact on subcloud {}".format(subcloud)) host_helper.swact_host(auth_info=sub_auth) LOG.tc_step("Verify cli's in {} and central region after subcloud swact".format(subcloud)) verify_cli(sub_auth, central_auth) LOG.tc_step("Ensure after swact, central region and subcloud admin internal endpoint are https") assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ "Central region admin internal endpoint is not https" assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ "Subcloud admin internal endpoint is not https"
def modify_https(enable_https=True, check_first=True, con_ssh=None, auth_info=Tenant.get('admin_platform'), fail_ok=False): """ Modify platform https via 'system modify https_enable=<bool>' Args: enable_https (bool): True/False to enable https or not check_first (bool): if user want to check if the lab is already in the state that user try to enable con_ssh (SSHClient): auth_info (dict): fail_ok (bool): Returns (tuple): (-1, msg) (0, msg) (1, <std_err>) """ if check_first: is_https = keystone_helper.is_https_enabled(source_openrc=False, auth_info=auth_info, con_ssh=con_ssh) if (is_https and enable_https) or (not is_https and not enable_https): msg = "Https is already {}. Do nothing.".format( 'enabled' if enable_https else 'disabled') LOG.info(msg) return -1, msg LOG.info("Modify system to {} https".format( 'enable' if enable_https else 'disable')) res, output = system_helper.modify_system(fail_ok=fail_ok, con_ssh=con_ssh, auth_info=auth_info, https_enabled='{}'.format( str(enable_https).lower())) if res == 1: return 1, output LOG.info("Wait up to 60s for config out-of-date alarm with best effort.") system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, entity_id='controller-', strict=False, con_ssh=con_ssh, timeout=60, fail_ok=True, auth_info=auth_info) LOG.info("Wait up to 600s for config out-of-date alarm to clear.") system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE, con_ssh=con_ssh, timeout=600, check_interval=20, fail_ok=False, auth_info=auth_info) LOG.info("Wait up to 300s for public endpoints to be updated") expt_status = 'enabled' if enable_https else 'disabled' end_time = time.time() + 300 while time.time() < end_time: if keystone_helper.is_https_enabled(con_ssh=con_ssh, source_openrc=False, auth_info=auth_info) == \ enable_https: break time.sleep(10) else: raise exceptions.KeystoneError( "Https is not {} in 'openstack endpoint list'".format(expt_status)) msg = 'Https is {} successfully'.format(expt_status) LOG.info(msg) # TODO: install certificate for https. There will be a warning msg if # self-signed certificate is used if not ProjVar.get_var('IS_DC') or \ (auth_info and auth_info.get('region', None) in ( 'RegionOne', 'SystemController')): # If DC, use the central region https as system https, since that is # the one used for external access CliAuth.set_vars(HTTPS=enable_https) return 0, msg
def is_https(con_ssh): return keystone_helper.is_https_enabled( con_ssh=con_ssh, source_openrc=True, auth_info=Tenant.get('admin_platform'))
def test_launch_app_via_sysinv(copy_test_apps, cleanup_app): """ Test upload, apply, remove, delete custom app via system cmd Args: copy_test_apps (str): module fixture cleanup_app: fixture Setups: - Copy test files from test server to stx system (module) - Remove and delete test app if exists Test Steps: - system application-upload test app tar file and wait for it to be uploaded - system application-apply test app and wait for it to be applied - wget <oam_ip>:<app_targetPort> from remote host - Verify app contains expected content - system application-remove test app and wait for it to be uninstalled - system application-delete test app from system """ app_dir = copy_test_apps app_name = HELM_APP_NAME LOG.tc_step("Upload {} helm charts".format(app_name)) container_helper.upload_app(app_name=app_name, app_version='1.0', tar_file=os.path.join(app_dir, HELM_TAR)) LOG.tc_step("Apply {}".format(app_name)) container_helper.apply_app(app_name=app_name) LOG.tc_step("wget app via <oam_ip>:<targetPort>") json_path = '{.spec.ports[0].nodePort}' node_port = kube_helper.get_pod_value_jsonpath( type_name='service/{}'.format(HELM_POD_FULL_NAME), jsonpath=json_path) assert re.match(r'\d+', node_port), "Unable to get nodePort via " \ "jsonpath '{}'".format(json_path) localhost = LocalHostClient(connect=True) prefix = 'https' if keystone_helper.is_https_enabled() else 'http' oam_ip = ProjVar.get_var('LAB')['floating ip'] output_file = '{}/{}.html'.format(ProjVar.get_var('TEMP_DIR'), HELM_APP_NAME) localhost.exec_cmd('wget {}://{}:{} -O {}'.format(prefix, oam_ip, node_port, output_file), fail_ok=False) LOG.tc_step("Verify app contains expected content") app_content = localhost.exec_cmd('cat {}; echo'.format(output_file), get_exit_code=False)[1] assert app_content.startswith(HELM_MSG), \ "App does not start with expected message." LOG.tc_step("Remove applied app") container_helper.remove_app(app_name=app_name) LOG.tc_step("Delete uninstalled app") container_helper.delete_app(app_name=app_name) LOG.tc_step("Wait for pod terminate") kube_helper.wait_for_resources_gone(resource_names=HELM_POD_FULL_NAME, check_interval=10, namespace='default')