def driver(request): auth_info = Tenant.get('admin_platform') if CliAuth.get_var('HTTPS') and container_helper.is_stx_openstack_deployed( auth_info=auth_info): openstack_domain = system_helper.get_service_parameter_values( service='openstack', section='helm', name='endpoint_domain', auth_info=auth_info) domain = openstack_domain[0] if openstack_domain else None ProjVar.set_var(openstack_domain=domain) driver_ = HorizonDriver.get_driver() def teardown(): HorizonDriver.quit_driver() request.addfinalizer(teardown) return driver_
def pytest_collectstart(): """ Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase. """ global initialized if not initialized: global con_ssh con_ssh = setups.setup_tis_ssh(ProjVar.get_var("LAB")) ProjVar.set_var(con_ssh=con_ssh) CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh)) if setups.is_https(con_ssh): CliAuth.set_vars(HTTPS=True) auth_url = CliAuth.get_var('OS_AUTH_URL') Tenant.set_platform_url(auth_url) setups.set_region(region=None) if ProjVar.get_var('IS_DC'): Tenant.set_platform_url(url=auth_url, central_region=True) initialized = True
def __get_lab_ssh(labname, log_dir=None): """ Args: labname: log_dir: Returns (SSHClient): """ lab = get_lab_dict(labname) # Doesn't have to save logs # if log_dir is None: # log_dir = temp_dir = "/tmp/CGCSAUTO/" if log_dir is not None: ProjVar.set_var(log_dir=log_dir) ProjVar.set_var(lab=lab) ProjVar.set_var(source_openrc=True) con_ssh = SSHClient(lab.get('floating ip'), HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect() # if 'auth_url' in lab: # Tenant._set_url(lab['auth_url']) return con_ssh
def pytest_configure(config): config.addinivalue_line("markers", "features(feature_name1, feature_name2, " "...): mark impacted feature(s) for a test case.") config.addinivalue_line("markers", "priorities(, cpe_sanity, p2, ...): mark " "priorities for a test case.") config.addinivalue_line("markers", "known_issue(LP-xxxx): mark known issue with " "LP ID or description if no LP needed.") if config.getoption('help'): return # Common reporting params collect_all = config.getoption('collectall') always_collect = config.getoption('alwayscollect') session_log_dir = config.getoption('sessiondir') resultlog = config.getoption('resultlog') # Test case params on installed system testcase_config = config.getoption('testcase_config') lab_arg = config.getoption('lab') natbox_arg = config.getoption('natbox') tenant_arg = config.getoption('tenant') horizon_visible = config.getoption('horizon_visible') is_vbox = config.getoption('is_vbox') global repeat_count repeat_count = config.getoption('repeat') global stress_count stress_count = config.getoption('stress') global count if repeat_count > 0: count = repeat_count elif stress_count > 0: count = stress_count global no_teardown no_teardown = config.getoption('noteardown') if repeat_count > 0 or no_teardown: ProjVar.set_var(NO_TEARDOWN=True) collect_netinfo = config.getoption('netinfo') # Determine lab value. lab = natbox = None if lab_arg: lab = setups.get_lab_dict(lab_arg) if natbox_arg: natbox = setups.get_natbox_dict(natbox_arg) lab, natbox = setups.setup_testcase_config(testcase_config, lab=lab, natbox=natbox) tenant = tenant_arg.upper() if tenant_arg else 'TENANT1' # Log collection params collect_all = True if collect_all else False always_collect = True if always_collect else False # If floating ip cannot be reached, whether to try to ping/ssh # controller-0 unit IP, etc. if collect_netinfo: ProjVar.set_var(COLLECT_SYS_NET_INFO=True) horizon_visible = True if horizon_visible else False if session_log_dir: log_dir = session_log_dir else: # compute directory for all logs based on resultlog arg, lab, # and timestamp on local machine resultlog = resultlog if resultlog else os.path.expanduser("~") if '/AUTOMATION_LOGS' in resultlog: resultlog = resultlog.split(sep='/AUTOMATION_LOGS')[0] resultlog = os.path.join(resultlog, 'AUTOMATION_LOGS') lab_name = lab['short_name'] time_stamp = strftime('%Y%m%d%H%M') log_dir = '{}/{}/{}'.format(resultlog, lab_name, time_stamp) os.makedirs(log_dir, exist_ok=True) # set global constants, which will be used for the entire test session, etc ProjVar.init_vars(lab=lab, natbox=natbox, logdir=log_dir, tenant=tenant, collect_all=collect_all, always_collect=always_collect, horizon_visible=horizon_visible) if lab.get('central_region'): ProjVar.set_var(IS_DC=True, PRIMARY_SUBCLOUD=config.getoption('subcloud')) if is_vbox: ProjVar.set_var(IS_VBOX=True) config_logger(log_dir, console=console_log) # set resultlog save location config.option.resultlog = ProjVar.get_var("PYTESTLOG_PATH") # Repeat test params file_or_dir = config.getoption('file_or_dir') origin_file_dir = list(file_or_dir) if count > 1: print("Repeat following tests {} times: {}".format(count, file_or_dir)) del file_or_dir[:] for f_or_d in origin_file_dir: for i in range(count): file_or_dir.append(f_or_d)
def revert(): ProjVar.set_var(REMOTE_CLI=True) ProjVar.set_var(USER_FILE_DIR=ProjVar.get_var('TEMP_DIR'))
def setup_testcase_config(testcase_config, lab=None, natbox=None): fip_error = 'A valid IPv4 OAM floating IP has to be specified via ' \ 'cmdline option --lab=<oam_floating_ip>, ' \ 'or testcase config file has to be provided via ' \ '--testcase-config with oam_floating_ip ' \ 'specified under auth_platform section.' if not testcase_config: if not lab: raise ValueError(fip_error) return lab, natbox testcase_config = os.path.expanduser(testcase_config) auth_section = 'auth' guest_image_section = 'guest_image' guest_networks_section = 'guest_networks' guest_keypair_section = 'guest_keypair' natbox_section = 'natbox' config = configparser.ConfigParser() config.read(testcase_config) # # Update global variables for auth section # # Update OAM floating IP if lab: fip = lab.get('floating ip') config.set(auth_section, 'oam_floating_ip', fip) else: fip = config.get(auth_section, 'oam_floating_ip', fallback='').strip() lab = get_lab_dict(fip) if __get_ip_version(fip) != 4: raise ValueError(fip_error) # controller-0 oam ip is updated with best effort if a valid IPv4 IP is # provided if not lab.get('controller-0 ip') and config.get( auth_section, 'controller0_oam_ip', fallback='').strip(): con0_ip = config.get(auth_section, 'controller0_oam_ip').strip() if __get_ip_version(con0_ip) == 4: lab['controller-0 ip'] = con0_ip else: LOG.info( "controller0_oam_ip specified in testcase config file is not " "a valid IPv4 address. Ignore.") # Update linux user credentials if config.get(auth_section, 'linux_username', fallback='').strip(): HostLinuxUser.set_user( config.get(auth_section, 'linux_username').strip()) if config.get(auth_section, 'linux_user_password', fallback='').strip(): HostLinuxUser.set_password( config.get(auth_section, 'linux_user_password').strip()) # Update openstack keystone user credentials auth_dict_map = { 'platform_admin': 'admin_platform', 'admin': 'admin', 'test1': 'tenant1', 'test2': 'tenant2', } for conf_prefix, dict_name in auth_dict_map.items(): kwargs = {} default_auth = Tenant.get(dict_name) conf_user = config.get(auth_section, '{}_username'.format(conf_prefix), fallback='').strip() conf_password = config.get(auth_section, '{}_password'.format(conf_prefix), fallback='').strip() conf_project = config.get(auth_section, '{}_project_name'.format(conf_prefix), fallback='').strip() conf_domain = config.get(auth_section, '{}_domain_name'.format(conf_prefix), fallback='').strip() conf_keypair = config.get(auth_section, '{}_nova_keypair'.format(conf_prefix), fallback='').strip() if conf_user and conf_user != default_auth.get('user'): kwargs['username'] = conf_user if conf_password and conf_password != default_auth.get('password'): kwargs['password'] = conf_password if conf_project and conf_project != default_auth.get('tenant'): kwargs['tenant'] = conf_project if conf_domain and conf_domain != default_auth.get('domain'): kwargs['domain'] = conf_domain if conf_keypair and conf_keypair != default_auth.get('nova_keypair'): kwargs['nova_keypair'] = conf_keypair if kwargs: Tenant.update(dict_name, **kwargs) # # Update global variables for natbox section # natbox_host = config.get(natbox_section, 'natbox_host', fallback='').strip() natbox_user = config.get(natbox_section, 'natbox_user', fallback='').strip() natbox_password = config.get(natbox_section, 'natbox_password', fallback='').strip() natbox_prompt = config.get(natbox_section, 'natbox_prompt', fallback='').strip() if natbox_host and (not natbox or natbox_host != natbox['ip']): natbox = get_natbox_dict(natbox_host, user=natbox_user, password=natbox_password, prompt=natbox_prompt) # # Update global variables for guest_image section # img_file_dir = config.get(guest_image_section, 'img_file_dir', fallback='').strip() glance_image_name = config.get(guest_image_section, 'glance_image_name', fallback='').strip() img_file_name = config.get(guest_image_section, 'img_file_name', fallback='').strip() img_disk_format = config.get(guest_image_section, 'img_disk_format', fallback='').strip() min_disk_size = config.get(guest_image_section, 'min_disk_size', fallback='').strip() img_container_format = config.get(guest_image_section, 'img_container_format', fallback='').strip() image_ssh_user = config.get(guest_image_section, 'image_ssh_user', fallback='').strip() image_ssh_password = config.get(guest_image_section, 'image_ssh_password', fallback='').strip() if img_file_dir and img_file_dir != GuestImages.DEFAULT['image_dir']: # Update default image file directory img_file_dir = os.path.expanduser(img_file_dir) if not os.path.isabs(img_file_dir): raise ValueError( "Please provide a valid absolute path for img_file_dir " "under guest_image section in testcase config file") GuestImages.DEFAULT['image_dir'] = img_file_dir if glance_image_name and glance_image_name != GuestImages.DEFAULT['guest']: # Update default glance image name GuestImages.DEFAULT['guest'] = glance_image_name if glance_image_name not in GuestImages.IMAGE_FILES: # Add guest image info to consts.stx.GuestImages if not (img_file_name and img_disk_format and min_disk_size): raise ValueError( "img_file_name and img_disk_format under guest_image " "section have to be " "specified in testcase config file") img_container_format = img_container_format if \ img_container_format else 'bare' GuestImages.IMAGE_FILES[glance_image_name] = \ (None, min_disk_size, img_file_name, img_disk_format, img_container_format) # Add guest login credentials Guest.CREDS[glance_image_name] = { 'user': image_ssh_user if image_ssh_user else 'root', 'password': image_ssh_password if image_ssh_password else None, } # # Update global variables for guest_keypair section # natbox_keypair_dir = config.get(guest_keypair_section, 'natbox_keypair_dir', fallback='').strip() private_key_path = config.get(guest_keypair_section, 'private_key_path', fallback='').strip() if natbox_keypair_dir: natbox_keypair_path = os.path.join( natbox_keypair_dir, 'keyfile_{}.pem'.format(lab['short_name'])) ProjVar.set_var(NATBOX_KEYFILE_PATH=natbox_keypair_path) if private_key_path: ProjVar.set_var(STX_KEYFILE_PATH=private_key_path) # # Update global variables for guest_networks section # net_name_patterns = { 'mgmt': config.get(guest_networks_section, 'mgmt_net_name_pattern', fallback='').strip(), 'data': config.get(guest_networks_section, 'data_net_name_pattern', fallback='').strip(), 'internal': config.get(guest_networks_section, 'internal_net_name_pattern', fallback='').strip(), 'external': config.get(guest_networks_section, 'external_net_name_pattern', fallback='').strip() } for net_type, net_name_pattern in net_name_patterns.items(): if net_name_pattern: Networks.set_neutron_net_patterns( net_type=net_type, net_name_pattern=net_name_pattern) return lab, natbox
def set_sys_type(con_ssh): sys_type = system_helper.get_sys_type(con_ssh=con_ssh) ProjVar.set_var(SYS_TYPE=sys_type)
def record_kpi(local_kpi_file, kpi_name, host=None, log_path=None, end_pattern=None, start_pattern=None, start_path=None, extended_regex=False, python_pattern=None, average_for_all=False, lab_name=None, con_ssh=None, sudo=False, topdown=False, init_time=None, build_id=None, start_host=None, uptime=5, start_pattern_init=False, sw_version=None, patch=None, unit=None, kpi_val=None, fail_ok=True): """ Record kpi in ini format in given file Args: local_kpi_file (str): local file path to store the kpi data kpi_name (str): name of the kpi host (str|None): which tis host the log is located at. When None, assume host is active controller start_host (str|None): specify only if host to collect start log is different than host for end log log_path (str): log_path on given host to check the kpi timestamps. Required if start_time or end_time is not specified end_pattern (str): One of the two options. Option2 only applies to duration type of KPI 1. pattern that signals the end or the value of the kpi. Used in Linux cmd 'grep' 2. end timestamp in following format: e.g., 2017-01-23 12:22:59 (for duration type of KPI) start_pattern (str|None): One of the two options. Only required for duration type of the KPI, where we need to calculate the time delta ourselves. 1. pattern that signals the start of the kpi. Used in Linux cmd 'grep'. 2. start timestamp in following format: e.g., 2017-01-23 12:10:00 start_path (str|None): log path to search for start_pattern if path is different than log_path for end_pattern extended_regex (bool): whether to use -E in grep for extended regex. python_pattern (str): Only needed for KPI that is directly taken from log without post processing, e.g., rate for drbd sync average_for_all (bool): whether to get all instances from the log and get average lab_name (str): e.g., ip_1-4, hp380 con_ssh (SSHClient|None): ssh client of active controller sudo (bool): whether to access log with sudo topdown (bool): whether to search log from top down. Default is bottom up. init_time (str|None): when set, logs prior to this timestamp will be ignored. uptime (int|str): get load average for the previous <uptime> minutes via 'uptime' cmd start_pattern_init (bool): when set, use the timestamp of the start pattern as the init time for the end pattern sw_version (str): e.g., 17.07 patch (str): patch name unit (str): unit for the kpi value if not 'Time(s)' Returns: """ try: if not lab_name: lab = ProjVar.get_var('LAB') if not lab: raise ValueError("lab_name needs to be provided") else: lab = lab_info.get_lab_dict(labname=lab_name) kpi_dict = {'lab': lab['name']} if start_pattern and end_pattern and build_id: # No need to ssh to system if both timestamps are known if re.match(TIMESTAMP_PATTERN, end_pattern) and re.match( TIMESTAMP_PATTERN, start_pattern): duration = common.get_timedelta_for_isotimes( time1=start_pattern, time2=end_pattern).total_seconds() kpi_dict.update({ 'value': duration, 'timestamp': end_pattern, 'build_id': build_id }) append_to_kpi_file(local_kpi_file=local_kpi_file, kpi_name=kpi_name, kpi_dict=kpi_dict) return if not con_ssh: con_ssh = ControllerClient.get_active_controller(fail_ok=True) if not con_ssh: if not ProjVar.get_var('LAB'): ProjVar.set_var(lab=lab) ProjVar.set_var(source_openrc=True) con_ssh = SSHClient(lab.get('floating ip'), HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect() if not build_id or not sw_version: build_info = system_helper.get_build_info(con_ssh=con_ssh) build_id = build_id if build_id else build_info['BUILD_ID'] sw_version = sw_version if sw_version else build_info['SW_VERSION'] kpi_dict.update({'build_id': build_id, 'sw_version': sw_version}) if not patch: patch = ProjVar.get_var('PATCH') if patch: patch = ' '.join(patch) kpi_dict.update({'patch': patch}) else: kpi_dict.update({'patch': patch}) load_average = get_load_average(ssh_client=con_ssh, uptime=uptime) kpi_dict.update({'load_average': load_average}) if not unit: unit = 'Time(s)' kpi_dict.update({'unit': unit}) if host: kpi_dict['host'] = host if log_path: kpi_dict['log_path'] = log_path if kpi_val is not None: time_stamp = common.get_date_in_format(ssh_client=con_ssh, date_format=KPI_DATE_FORMAT) else: if start_pattern: kpi_val, time_stamp, count = get_duration( start_pattern=start_pattern, start_path=start_path, end_pattern=end_pattern, log_path=log_path, host=host, sudo=sudo, topdown=topdown, extended_regex=extended_regex, average_for_all=average_for_all, init_time=init_time, start_host=start_host, start_pattern_init=start_pattern_init, con_ssh=con_ssh) else: kpi_val, time_stamp, count = get_match( pattern=end_pattern, log_path=log_path, host=host, extended_regex=extended_regex, python_pattern=python_pattern, average_for_all=average_for_all, sudo=sudo, topdown=topdown, init_time=init_time, con_ssh=con_ssh) kpi_dict.update({'timestamp': time_stamp, 'value': kpi_val}) append_to_kpi_file(local_kpi_file=local_kpi_file, kpi_name=kpi_name, kpi_dict=kpi_dict) return 0, kpi_val except Exception as e: if not fail_ok: raise print("Failed to record kpi. Error: {}".format(e.__str__())) import traceback import sys traceback.print_exc(file=sys.stdout) return 1, e.__str__()
def patch_orchestration_setup(): ProjVar.set_var(SOURCE_OPENRC=True) patching_helper.check_system_health() lab = InstallVars.get_install_var('LAB') bld_server = get_build_server_info( PatchingVars.get_patching_var('PATCH_BUILD_SERVER')) output_dir = ProjVar.get_var('LOG_DIR') patch_dir = PatchingVars.get_patching_var('PATCH_DIR') LOG.info("Using patch directory path: {}".format(patch_dir)) bld_server_attr = dict() bld_server_attr['name'] = bld_server['name'] bld_server_attr['server_ip'] = bld_server['ip'] bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format( 'svc-cgcsauto', bld_server['name']) bld_server_conn = SSHClient(bld_server_attr['name'], user=TestFileServer.get_user(), password=TestFileServer.get_password(), initial_prompt=bld_server_attr['prompt']) bld_server_conn.connect() bld_server_conn.exec_cmd("bash") bld_server_conn.set_prompt(bld_server_attr['prompt']) bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key()) bld_server_attr['ssh_conn'] = bld_server_conn bld_server_obj = Server(**bld_server_attr) # Download patch files from specified patch dir LOG.info("Downloading patch files from patch dir {}".format(patch_dir)) rc = bld_server_obj.ssh_conn.exec_cmd("test -d " + patch_dir)[0] assert rc == 0, "Patch directory path {} not found".format(patch_dir) clear_patch_dest_dir() patches = download_patches(lab, bld_server_obj, patch_dir) if len(patches) == 0: pytest.skip("No patch files found in {}:{}.".format( bld_server_obj.name, patch_dir)) controller_apply_strategy = PatchingVars.get_patching_var( 'CONTROLLER_APPLY_TYPE') storage_apply_strategy = PatchingVars.get_patching_var( 'STORAGE_APPLY_TYPE') compute_apply_strategy = PatchingVars.get_patching_var( 'COMPUTE_APPLY_TYPE') max_parallel_computes = PatchingVars.get_patching_var( 'MAX_PARALLEL_COMPUTES') instance_action = PatchingVars.get_patching_var('INSTANCE_ACTION') alarm_restrictions = PatchingVars.get_patching_var('ALARM_RESTRICTIONS') if controller_apply_strategy: LOG.info("Controller apply type: {}".format(controller_apply_strategy)) if storage_apply_strategy: LOG.info("Storage apply type: {}".format(storage_apply_strategy)) if compute_apply_strategy: LOG.info("Compute apply type: {}".format(compute_apply_strategy)) if max_parallel_computes: LOG.info("Maximum parallel computes: {}".format(max_parallel_computes)) if instance_action: LOG.info("Instance action: {}".format(instance_action)) if alarm_restrictions: LOG.info("Alarm restriction option: {}".format(alarm_restrictions)) _patching_setup = { 'lab': lab, 'output_dir': output_dir, 'build_server': bld_server_obj, 'patch_dir': patch_dir, 'patches': patches, 'controller_apply_strategy': controller_apply_strategy, 'storage_apply_strategy': storage_apply_strategy, 'compute_apply_strategy': compute_apply_strategy, 'max_parallel_computes': max_parallel_computes, 'instance_action': instance_action, 'alarm_restrictions': alarm_restrictions, } LOG.info("Patch Orchestration ready to start: {} ".format(_patching_setup)) return _patching_setup
def vms_with_upgrade(): """ Test test_vms_with_upgrade is for create various vms before upgrade Skip conditions: - Less than two hosts configured with storage backing under test Setups: - Add admin role to primary tenant (module) Test Steps: - Create flv_rootdisk without ephemeral or swap disks, and set storage backing extra spec - Create flv_ephemswap with ephemeral AND swap disks, and set storage backing extra spec - Boot following vms and wait for them to be pingable from NatBox: - Boot vm1 from volume with flavor flv_rootdisk - Boot vm2 from volume with flavor flv_localdisk - Boot vm3 from image with flavor flv_rootdisk - Boot vm4 from image with flavor flv_rootdisk, and attach a volume to it - Boot vm5 from image with flavor flv_localdisk - start upgrade ....Follows upgrade procedure - Ping NAT during the upgrade before live migration - complete upgrade. Teardown: - Not complete ....Delete created vms, volumes, flavors """ ProjVar.set_var(SOURCE_OPENRC=True) Tenant.set_primary('tenant2') LOG.fixture_step("Create a flavor without ephemeral or swap disks") flavor_1 = nova_helper.create_flavor('flv_rootdisk')[1] ResourceCleanup.add('flavor', flavor_1) LOG.fixture_step("Create another flavor with ephemeral and swap disks") flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, swap=512)[1] ResourceCleanup.add('flavor', flavor_2) LOG.fixture_step( "Boot vm1 from volume with flavor flv_rootdisk and wait for it pingable from NatBox" ) vm1_name = "vol_root" vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, cleanup='function')[1] LOG.fixture_step( "Boot vm2 from volume with flavor flv_localdisk and wait for it pingable from NatBox" ) vm2_name = "vol_ephemswap" vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm1) vm_helper.wait_for_vm_pingable_from_natbox(vm2) LOG.fixture_step( "Boot vm3 from image with flavor flv_rootdisk and wait for it pingable from NatBox" ) vm3_name = "image_root" vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, cleanup='function')[1] LOG.fixture_step( "Boot vm4 from image with flavor flv_rootdisk, attach a volume to it and wait for it " "pingable from NatBox") vm4_name = 'image_root_attachvol' vm4 = vm_helper.boot_vm(vm4_name, flavor_1, cleanup='function')[1] vol = cinder_helper.create_volume(bootable=False)[1] ResourceCleanup.add('volume', vol) vm_helper.attach_vol_to_vm(vm4, vol_id=vol) LOG.fixture_step( "Boot vm5 from image with flavor flv_localdisk and wait for it pingable from NatBox" ) vm5_name = 'image_ephemswap' vm5 = vm_helper.boot_vm(vm5_name, flavor_2, source='image', cleanup='function')[1] vm_helper.wait_for_vm_pingable_from_natbox(vm4) vm_helper.wait_for_vm_pingable_from_natbox(vm5) vms = [vm1, vm2, vm3, vm4, vm5] return vms
def pytest_itemcollected(item): if "test_duplex_plus_install" in item.nodeid: ProjVar.set_var(sys_type=SysType.AIO_PLUS)
def revert(): ProjVar.set_var(REMOTE_CLI=remote_cli)
def setup_test_session(global_setup, request): """ Setup primary tenant and Nax Box ssh before the first test gets executed. TIS ssh was already set up at collecting phase. """ LOG.fixture_step("(session) Setting up test session...") setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT')) global con_ssh if not con_ssh: con_ssh = ControllerClient.get_active_controller() # set build id to be used to upload/write test results setups.set_build_info(con_ssh) # Set global vars setups.set_session(con_ssh=con_ssh) # Ensure tis and natbox (if applicable) ssh are connected con_ssh.connect(retry=True, retry_interval=3, retry_timeout=300) # set up natbox connection and copy keyfile natbox_dict = ProjVar.get_var('NATBOX') global natbox_ssh try: natbox_ssh = setups.setup_natbox_ssh(natbox_dict, con_ssh=con_ssh) except: if ProjVar.get_var('COLLECT_SYS_NET_INFO'): setups.collect_sys_net_info(lab=ProjVar.get_var('LAB')) raise # Enable keystone debug if ProjVar.get_var('KEYSTONE_DEBUG'): setups.enable_disable_keystone_debug(enable=True, con_ssh=con_ssh) # collect telnet logs for all hosts if ProjVar.get_var('COLLECT_TELNET'): end_event = Event() threads = setups.collect_telnet_logs_for_nodes(end_event=end_event) ProjVar.set_var(TELNET_THREADS=(threads, end_event)) # set global var for sys_type setups.set_sys_type(con_ssh=con_ssh) # rsync files between controllers setups.copy_test_files() # set up remote cli clients client = con_ssh if ProjVar.get_var('REMOTE_CLI'): LOG.fixture_step("(session) Install remote cli clients in virtualenv") client = setups.setup_remote_cli_client() ProjVar.set_var(USER_FILE_DIR=ProjVar.get_var('TEMP_DIR')) def remove_remote_cli(): LOG.fixture_step("(session) Remove remote cli clients") client.exec_cmd('rm -rf {}/*'.format(ProjVar.get_var('TEMP_DIR'))) client.close() from utils.clients.local import RemoteCLIClient RemoteCLIClient.remove_remote_cli_clients() ProjVar.set_var(REMOTE_CLI=None) ProjVar.set_var(USER_FILE_DIR=HostLinuxUser.get_home()) request.addfinalizer(remove_remote_cli)