def ssh_to_stx(lab=None, set_client=False): if not lab: lab = ProjVar.get_var('LAB') user = HostLinuxUser.get_user() password = HostLinuxUser.get_password() if ProjVar.get_var('IPV6_OAM'): lab = convert_to_ipv6(lab) LOG.info("SSH to IPv6 system {} via tuxlab2".format(lab['short_name'])) tuxlab2_ip = YOW_TUXLAB2['ip'] tux_user = TestFileServer.get_user() tuxlab_prompt = r'{}@{}\:(.*)\$ '.format(tux_user, YOW_TUXLAB2['name']) tuxlab2_ssh = SSHClient(host=tuxlab2_ip, user=tux_user, password=TestFileServer.get_password(), initial_prompt=tuxlab_prompt) tuxlab2_ssh.connect(retry_timeout=300, retry_interval=30, timeout=60) con_ssh = SSHFromSSH(ssh_client=tuxlab2_ssh, host=lab['floating ip'], user=user, password=password, initial_prompt=Prompt.CONTROLLER_PROMPT) else: con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(), password=HostLinuxUser.get_password(), initial_prompt=Prompt.CONTROLLER_PROMPT) con_ssh.connect(retry=True, retry_timeout=30, use_current=False) if set_client: ControllerClient.set_active_controller(con_ssh) return con_ssh
def keyfile_setup(request): """ setup the public key file on the lab under /home/root/.ssh/authorized_keys Args: request: pytset arg Returns (str): """ # copy the authorized key from test server to lab under /home/root/.ssh/authorized_keys LOG.fixture_step("copy id_rsa.pub key file from test server to lab") source = '/folk/svc-cgcsauto/.ssh/id_rsa.pub' destination = HostLinuxUser.get_home() common.scp_from_test_server_to_active_controller(source_path=source, dest_dir=destination) con_ssh = ControllerClient.get_active_controller() sysadmin_keyfile = HostLinuxUser.get_home() + '/id_rsa.pub' LOG.fixture_step("Logging in as root") with con_ssh.login_as_root() as root_ssh: LOG.info("Logged in as root") root_ssh.exec_cmd('mkdir -p /home/root/.ssh') root_ssh.exec_cmd('touch /home/root/.ssh/authorized_keys') root_ssh.exec_cmd('cat ' + sysadmin_keyfile + ' >> /home/root/.ssh/authorized_keys') def delete_keyfile(): LOG.fixture_step("cleanup files from the lab as root") # clean up id_rsa.pub from sysadmin folder and authorized_keys in /home/root/.ssh/ con_ssh.exec_cmd('rm {}/id_rsa.pub'.format(HostLinuxUser.get_home())) con_ssh.exec_sudo_cmd('rm -f /home/root/.ssh/authorized_keys') request.addfinalizer(delete_keyfile)
def backup_sensor_data_files(hosts=None, con_ssh=None): if hosts is None: hosts = system_helper.get_hosts() elif isinstance(hosts, str): hosts = [hosts] LOG.info("Check and ensure sensor data files for {} are copied to " "{} if available".format(hosts, HostLinuxUser.get_home())) hosts_with_file = [] con_ssh = ControllerClient.get_active_controller() if not con_ssh else \ con_ssh for host in hosts: dest_path = "{}/hwmond_{}_sensor_data".format(HostLinuxUser.get_home(), host) if con_ssh.file_exists(dest_path): hosts_with_file.append(host) else: source_path = BMCPath.SENSOR_DATA_FILE_PATH.format( BMCPath.SENSOR_DATA_DIR, host) if con_ssh.file_exists(source_path): con_ssh.exec_sudo_cmd('cp {} {}'.format( source_path, dest_path), fail_ok=False) hosts_with_file.append(host) LOG.info("Sensor data files for {} are copied to {}".format( hosts, HostLinuxUser.get_home())) return hosts
def check_controller_filesystem(con_ssh=None): LOG.info("Checking controller root fs size ... ") if con_ssh is None: con_ssh = ControllerClient.get_active_controller() patch_dest_dir1 = HostLinuxUser.get_home() + "patches/" patch_dest_dir2 = HostLinuxUser.get_home() + "upgrade_patches/" upgrade_load_path = os.path.join(HostLinuxUser.get_home(), install_helper.UPGRADE_LOAD_ISO_FILE) current_version = system_helper.get_sw_version(use_existing=False) cmd = "df | grep /dev/root | awk ' { print $5}'" rc, output = con_ssh.exec_cmd(cmd) if rc == 0 and output: LOG.info("controller root fs size is {} full ".format(output)) percent = int(output.strip()[:-1]) if percent > 69: con_ssh.exec_cmd("rm {}/*".format(patch_dest_dir1)) con_ssh.exec_cmd("rm {}/*".format(patch_dest_dir2)) con_ssh.exec_cmd("rm {}".format(upgrade_load_path)) with host_helper.ssh_to_host('controller-1') as host_ssh: host_ssh.exec_cmd("rm {}/*".format(patch_dest_dir1)) host_ssh.exec_cmd("rm {}/*".format(patch_dest_dir2)) host_ssh.exec_cmd("rm {}".format(upgrade_load_path)) if current_version == '15.12': time.sleep(120) else: entity_id = 'host=controller-0.filesystem=/' system_helper.wait_for_alarms_gone( [(EventLogID.FS_THRESHOLD_EXCEEDED, entity_id)], check_interval=10, fail_ok=True, timeout=180)
def restore_sysadmin_password_raw(connect, current_password, original_password, exclude_list): if current_password == original_password: LOG.info( 'Current password is the same as the original password?!, do nothing' ) return for n in range(1, MAX_NUM_PASSWORDS_TRACKED + 1): new_password = security_helper.gen_linux_password( exclude_list=exclude_list, length=PASSWORD_LEGNTH) exclude_list.append(new_password) LOG.info('chaning password {} times: from:{} to:{}\n'.format( n, current_password, new_password)) change_password(connect, current_password, new_password) HostLinuxUser.set_password(new_password) current_password = new_password LOG.info('Restore password of sysadmin to:{}'.format(original_password)) change_password(connect, current_password, original_password) HostLinuxUser.set_password(original_password) LOG.info( 'Password for sysadmin is restored to:{}'.format(original_password))
def __get_lab_ssh(labname, log_dir=None): """ Args: labname: log_dir: Returns (SSHClient): """ lab = get_lab_dict(labname) # Doesn't have to save logs # if log_dir is None: # log_dir = temp_dir = "/tmp/CGCSAUTO/" if log_dir is not None: ProjVar.set_var(log_dir=log_dir) ProjVar.set_var(lab=lab) ProjVar.set_var(source_openrc=True) con_ssh = SSHClient(lab.get('floating ip'), HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect() # if 'auth_url' in lab: # Tenant._set_url(lab['auth_url']) return con_ssh
def prep_test_on_host(target_ssh, target, file_path, active_controller_name, cyclictest_dir=CYCLICTEST_DIR): LOG.tc_step( "Copy cyclictest executable to target if not already exist: {}".format( target)) target_ssh.exec_cmd('mkdir -p {}; rm -f {}/*.*'.format( cyclictest_dir, cyclictest_dir)) dest_path = '{}/{}'.format(cyclictest_dir, os.path.basename(file_path)) if not target_ssh.file_exists(dest_path): LOG.info('Copy CYCLICTEST to selected host {}:{}'.format( target, dest_path)) target_ssh.scp_on_dest(HostLinuxUser.get_user(), active_controller_name, dest_path=dest_path, source_path=file_path, source_pswd=HostLinuxUser.get_password()) LOG.info('Check if CYCLICTEST was copied to target host') assert target_ssh.file_exists(dest_path), \ 'Failed to find CYCLICTEST executable on target host after copied' LOG.info('-successfully copied to {}:{}'.format(target, file_path))
def scp_from_local(source_path, dest_ip, dest_path=None, dest_user=None, dest_password=None, timeout=900, is_dir=False): """ Scp file(s) from localhost (i.e., from where the automated tests are executed). Args: source_path (str): source file/directory path dest_ip (str): ip of the destination host dest_user (str): username of destination host. dest_password (str): password of destination host dest_path (str): destination directory path to copy the file(s) to timeout (int): max time to wait for scp finish in seconds is_dir (bool): whether to copy a single file or a directory """ if not dest_path: dest_path = HostLinuxUser.get_home() if not dest_user: dest_user = HostLinuxUser.get_user() if not dest_password: dest_password = HostLinuxUser.get_password() dir_option = '-r ' if is_dir else '' cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}{} {}@{}:{}'.\ format(dir_option, source_path, dest_user, dest_ip, dest_path) _scp_on_local(cmd, remote_password=dest_password, timeout=timeout)
def get_current_user_password(cls, con_ssh=None): if not con_ssh: con_ssh = ControllerClient.get_active_controller() user = con_ssh.get_current_user() if user == HostLinuxUser.get_user(): return user, HostLinuxUser.get_password() return user, cls.__users[user]
def test_linux_user_lockout(): """ Verify linux user account will be lockout after 5? failed attempts Test Steps: - attempt to login with invalid password as sysadmin 5 times - verify cannot login as sysadmin anymore Returns: """ LOG.tc_step( 'Attempt to login with WRONG password as sysadmin {} times'.format( MAX_FAILED_LOGINS)) user = '******' if HostLinuxUser.get_user() != user: skip( 'Error: user name from HostLinuxCreds.get_user() != sysadmin, it is:{}' .format(HostLinuxUser.get_user())) password = HostLinuxUser.get_password() invalid_password = '******' host = lab_info.get_lab_floating_ip() LOG.info('verify we can login in at beginning') connect = log_in_raw(host, user, password, expect_fail=False) assert connect, 'Failed to login in at beginning with password'.format( password) for n in range(1, MAX_FAILED_LOGINS + 1): message = '{}: Expecting to fail to login with invalid password, host:{}, user:{}, password:{}\n'.format( n, host, user, invalid_password) LOG.info(message) connect = log_in_raw(host, user, invalid_password, expect_fail=True) assert not connect, 'Expecting to fail but not.' + message LOG.info( 'OK, failed {} times to login with invalid password:{} as user:{} to host:{}\n' .format(MAX_FAILED_LOGINS, invalid_password, user, host)) LOG.tc_step( 'Now attempt to login with CORRECT password:{}, expecting to fail\n'. format(password)) connect = log_in_raw(host, user, password, expect_fail=True) message = 'host:{}, user:{}, password:{}\n'.format(host, user, password) assert not connect, 'Expecting to fail but not.' + message LOG.info( 'OK, failed to login with CORRECT password due to the user account was locked down\n' ) LOG.tc_step( 'Wait for 5 minutes + 20 seconds for the account been automatically unlocked\n' ) time.sleep(320) LOG.info('verify we can login again after waiting for 5 minutes') connect = log_in_raw(host, user, password, expect_fail=False) assert connect, 'Failed to login again after waiting for 5 minutes.' + message
def setup_tis_ssh(lab): con_ssh = ControllerClient.get_active_controller(fail_ok=True) if con_ssh is None: con_ssh = SSHClient(lab['floating ip'], HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect(retry=True, retry_timeout=30) ControllerClient.set_active_controller(con_ssh) return con_ssh
def test_enable_tpm(swact_first): con_ssh = ControllerClient.get_active_controller() LOG.tc_step('Check if TPM is already configured') code, cert_id, cert_type = get_tpm_status(con_ssh) if code == 0: LOG.info('TPM already configured on the lab, cert_id:{}, cert_type:{}'. format(cert_id, cert_type)) LOG.tc_step('disable TPM first in order to test enabling TPM') code, output = remove_cert_from_tpm(con_ssh, fail_ok=False, check_first=False) assert 0 == code, 'failed to disable TPM' time.sleep(30) LOG.info('Waiting alarm: out-of-config cleaned up') system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE) else: LOG.info('TPM is NOT configured on the lab') LOG.info('-code:{}, cert_id:{}, cert_type:{}'.format( code, cert_id, cert_type)) if swact_first: LOG.tc_step('Swact the active controller as instructed') if len(system_helper.get_controllers()) < 2: LOG.info('Less than 2 controllers, skip swact') else: host_helper.swact_host(fail_ok=False) copy_config_from_local( con_ssh, local_conf_backup_dir, os.path.join(HostLinuxUser.get_home(), conf_backup_dir)) LOG.tc_step('Install HTTPS Certificate into TPM') code, output = store_cert_into_tpm( con_ssh, check_first=False, fail_ok=False, pem_password=HostLinuxUser.get_password()) assert 0 == code, 'Failed to instll certificate into TPM, cert-file' LOG.info('OK, certificate is installed into TPM') LOG.info('Wait the out-of-config alarm cleared') system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE) LOG.tc_step( 'Verify the configurations changes for impacted components, expecting all changes exit' ) verify_configuration_changes(expected=True, connection=con_ssh)
def ssh_to_stx(lab=None, set_client=False): if not lab: lab = ProjVar.get_var('LAB') con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(), password=HostLinuxUser.get_password(), initial_prompt=Prompt.CONTROLLER_PROMPT) con_ssh.connect(retry=True, retry_timeout=30, use_current=False) if set_client: ControllerClient.set_active_controller(con_ssh) return con_ssh
def test_configure_external_ceph(ceph_lab, ceph_services): """ This test will configure external ceph on a system. Currently this is only supported on wcp3-6, using wcp7-12 as the external ceph system. In order to support this, the user will need to install wcp7-12 in Region mode, then wcp3-6 needs to be installed with a custom config that includes an infrastructure interface. The ceph.conf file needs to be then copied from wcp7-12 onto wcp3-6. This conf file needs to be renamed to something other than ceph.conf and then used to enable external ceph. Only the following services can be enabled on external ceph: cinder, nova and glance. Swift is not supported. Once external ceph is enabled, a new cinder type will be added and resource creation will default to the external ceph backend (depending on what services were provisioned in the first place). Test Steps: 1. Copy ceph.conf from wcp7-12 2. Provision external ceph services on wcp3-6 TODO: - Add an infra ping test from regular lab to ceph lab - skip if fails """ LOG.tc_step("Retrieve ceph.conf from the external ceph system") con_ssh = ControllerClient.get_active_controller() ceph_lab = get_lab_dict(ceph_lab) source_server = ceph_lab['floating ip'] source_lab_name = ceph_lab['short_name'] source_filepath = "/etc/ceph/ceph.conf" dest_filepath = "/home/sysadmin/ceph_{}.conf".format(source_lab_name) con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), source_ip=source_server, source_pswd=HostLinuxUser.get_password(), source_path=source_filepath, dest_path=dest_filepath, timeout=60) LOG.tc_step( "Confirm ceph.conf file was successfully copied before proceeding") if not con_ssh.file_exists(dest_filepath): skip("External ceph.conf not present on the system") LOG.tc_step("Provision storage-backend for external ceph") add_external_ceph(dest_filepath, ceph_services.split(sep='_'))
def __init__(self, host, prompt=None, port=0, timeout=30, hostname=None, user=HostLinuxUser.get_user(), password=HostLinuxUser.get_password(), negotiate=False, vt100query=False, console_log_file=None): self.logger = LOG super(TelnetClient, self).__init__(host=host, port=port, timeout=timeout) if not hostname: self.send('\r\n\r\n') prompts = [LOGIN_REGEX, LOGGED_IN_REGEX] index, re_obj, matched_text = super().expect(prompts, timeout=10) if index in (0, 1): hostname = prompts[index].search(matched_text).group(1).decode( errors='ignore') if not prompt: prompt = r':~\$ ' #-- mod begins self.console_log_file = self.get_log_file(console_log_file) self.negotiate = negotiate self.vt100query = vt100query if self.vt100query: self.vt100querybuffer = b'' # Buffer for VT100 queries #-- mod ends self.flush(timeout=1) self.logger = telnet_logger(hostname) if hostname else telnet_logger( host + ":" + str(port)) self.hostname = hostname self.prompt = prompt self.cmd_output = '' self.cmd_sent = '' self.timeout = timeout self.user = user self.password = password self.logger.info( 'Telnet connection to {}:{} ({}) is established'.format( host, port, hostname))
def download_patches(lab, server, patch_dir, conn_ssh=None): """ Args: lab: server: patch_dir: conn_ssh: Returns: """ patches = {} rc, output = server.ssh_conn.exec_cmd( "ls -1 --color=none {}/*.patch".format(patch_dir)) assert rc == 0, "Failed to list patch files in directory path {}.".format( patch_dir) if output is not None: patch_dest_dir = HostLinuxUser.get_home() + "patches/" active_controller = system_helper.get_active_controller_name() dest_server = lab[active_controller + ' ip'] ssh_port = None pre_opts = 'sshpass -p "{0}"'.format(HostLinuxUser.get_password()) server.ssh_conn.rsync(patch_dir + "/*.patch", dest_server, patch_dest_dir, ssh_port=ssh_port, pre_opts=pre_opts) if conn_ssh is None: conn_ssh = ControllerClient.get_active_controller() rc, output = conn_ssh.exec_cmd( "ls -1 {}/*.patch".format(patch_dest_dir)) assert rc == 0, "Failed to list downloaded patch files in directory path {}.".format( patch_dest_dir) if output is not None: for item in output.splitlines(): patches[os.path.splitext(os.path.basename(item))[0]] = item patch_ids = " ".join(patches.keys()) LOG.info("List of patches:\n {}".format(patch_ids)) return patches
def scp_from_active_controller_to_localhost( source_path, dest_path='', src_user=None, src_password=None, timeout=900, is_dir=False): active_cont_ip = ControllerClient.get_active_controller().host if not src_user: src_user = HostLinuxUser.get_user() if not src_password: src_password = HostLinuxUser.get_password() return scp_to_local(source_path=source_path, source_ip=active_cont_ip, source_user=src_user, source_password=src_password, dest_path=dest_path, timeout=timeout, is_dir=is_dir)
def test_patch_log_upload_dir(get_patch_name): """ Checks that the correct logs are added when uploading a directory of patches Test Steps: - Upload patches from a directory - Check the log files for the expected logs """ patch_name = get_patch_name con_ssh = ControllerClient.get_active_controller() LOG.tc_step("Uploading patches from directory") code = con_ssh.exec_sudo_cmd('sw-patch upload-dir test_patches')[0] if code is not 0: skip("No patches found. Cannot test.") res_1 = check_dir(patch_name) search_for = ['sw-patch-controller-daemon.*INFO: Importing patches:.*{}'.format(patch_name), 'sw-patch-controller-daemon.*INFO: Importing patch:.*{}'.format(patch_name)] res_2 = check_logs(search_for, lines=20, api=False) user = HostLinuxUser.get_user() search_for = ['sw-patch-controller-daemon.*INFO: User: {}/admin Action: ' 'Importing patches:.*{}.patch'.format(user, patch_name), 'sw-patch-controller-daemon.*INFO: User: {}/admin Action: ' 'Importing patch:.*{}.patch'.format(user, patch_name)] res_3 = check_logs(search_for, lines=10, api=True) LOG.tc_step("Deleting patch {}".format(patch_name)) con_ssh.exec_sudo_cmd('sw-patch delete {}'.format(patch_name)) assert res_1, "FAIL: The patch was not in \"sw-patch query\"" assert res_2, "FAIL: uploading patches did not generate the expected logs in patching.log" assert res_3, "FAIL: uploading patches did not generate the expected logs in patching-api.log"
def _get_large_heat(con_ssh=None): """ copy the heat templates to TiS server. Args: con_ssh (SSHClient): Returns (str): TiS file path of the heat template """ file_dir = StxPath.CUSTOM_HEAT_TEMPLATES file_name = HeatTemplate.LARGE_HEAT file_path = file_dir + file_name source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name if con_ssh is None: con_ssh = ControllerClient.get_active_controller() LOG.info('Check if file already exists on TiS') if con_ssh.file_exists(file_path=file_path): LOG.info('dest path {} already exists. Return existing path'.format(file_path)) return file_path with host_helper.ssh_to_test_server() as ssh_to_server: ssh_to_server.rsync(source_file, html_helper.get_ip_addr(), file_dir, dest_user=HostLinuxUser.get_user(), dest_password=HostLinuxUser.get_password(), timeout=1200) return file_path
def app_upload_apply(con_ssh=None, auth_info=Tenant.get('admin_platform')): """ Upload stx-monitor Apply stx-monitor """ # Do application upload stx-monitor. app_dir = HostLinuxUser.get_home() tar_file = os.path.join(app_dir, STX_MONITOR_TAR) LOG.info("Upload %s" % tar_file) container_helper.upload_app( tar_file=tar_file, app_name=STX_MONITOR_APP_NAME, con_ssh=con_ssh, auth_info=auth_info, uploaded_timeout=3600, ) # Do application apply stx-monitor. LOG.info("Apply %s" % STX_MONITOR_APP_NAME) container_helper.apply_app(app_name=STX_MONITOR_APP_NAME, applied_timeout=3600, check_interval=60, con_ssh=con_ssh, auth_info=auth_info)
def cleaup(): ssh_client.exec_sudo_cmd('rm -rf ' + working_ssl_file) backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir) ssh_client.exec_sudo_cmd('rm -rf ' + backup_dir) LOG.info('remove saved configuration files on local') if os.path.exists(local_conf_backup_dir): shutil.rmtree(local_conf_backup_dir)
def backup_configuration_files(): backup_dir = os.path.join(HostLinuxUser.get_home(), conf_backup_dir) ssh_client = ControllerClient.get_active_controller() LOG.info('Save current configuration files') ssh_client.exec_sudo_cmd('rm -f ' + backup_dir + '; mkdir -p ' + backup_dir) for service, file_info in file_changes.items(): for conf_file in file_info: ssh_client.exec_sudo_cmd('cp -f ' + conf_file + ' ' + backup_dir) source_ip = system_helper.get_oam_values()['oam_floating_ip'] # if os.path.exists(local_conf_backup_dir): # os.rmdir(local_conf_backup_dir) common.scp_to_local(backup_dir, source_ip=source_ip, dest_path=local_conf_backup_dir, is_dir=True) if os.path.exists(local_conf_backup_dir): shutil.rmtree(local_conf_backup_dir) source_ip = system_helper.get_oam_values()['oam_floating_ip'] common.scp_to_local(backup_dir, source_ip=source_ip, dest_path=local_conf_backup_dir, is_dir=True)
def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None): """ fetch cert file from build server. scp to TiS. Args: cert_file (str): valid values: ca-cert, server-with-key scp_to_local (bool): Whether to scp cert file to localhost as well. con_ssh (SSHClient): active controller ssh client Returns (str|None): cert file path on localhost if scp_to_local=True, else cert file path on TiS system. If no certificate found, return None. """ if not cert_file: cert_file = '{}/ca-cert.pem'.format(HostLinuxUser.get_home()) if not con_ssh: con_ssh = ControllerClient.get_active_controller() if not con_ssh.file_exists(cert_file): raise FileNotFoundError( '{} not found on active controller'.format(cert_file)) if scp_to_local: cert_name = os.path.basename(cert_file) dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), cert_name) common.scp_from_active_controller_to_localhost(source_path=cert_file, dest_path=dest_path, timeout=120) cert_file = dest_path LOG.info("Cert file copied to {} on localhost".format(dest_path)) return cert_file
def delete_test_users(): global _host_users restore_sysadmin_password(target_password=TARGET_PASSWORD) LOG.info('Deleting users created for testing\n') conn_to_ac = ControllerClient.get_active_controller() count = 0 for (host, user), _ in _host_users.items(): if user == 'sysadmin' or user == HostLinuxUser.get_user(): LOG.info('-do not delete user:{} on host:{}\n'.format( user, host)) continue LOG.info('-deleting user:{} on host:{}\n'.format(user, host)) count += 1 if host == 'active-controller': conn_to_ac.exec_sudo_cmd('userdel -r {}'.format(user)) else: # sleep a bit so controller-1 have same password as controller-0 time.sleep(30) with host_helper.ssh_to_host(host, password='******') as conn: LOG.info( 'TODO: delete user:{} on host:{} by CLI: userdel -r {}\n' .format(user, host, user)) conn.exec_sudo_cmd("userdel -r '{}'".format(user)) LOG.info('{} test user deleted'.format(count))
def launch_lab_setup_tenants_vms(): home_dir = HostLinuxUser.get_home() stack1 = "{}/lab_setup-tenant1-resources.yaml".format(home_dir) stack1_name = "lab_setup-tenant1-resources" stack2 = "{}/lab_setup-tenant2-resources.yaml".format(home_dir) stack2_name = "lab_setup-tenant2-resources" script_name = "{}/create_resource_stacks.sh".format(home_dir) con_ssh = ControllerClient.get_active_controller() if con_ssh.file_exists(file_path=script_name): cmd1 = 'chmod 755 ' + script_name con_ssh.exec_cmd(cmd1) con_ssh.exec_cmd(script_name, fail_ok=False) stack_id_t1 = heat_helper.get_stacks(name=stack1_name, auth_info=Tenant.get('tenant1')) # may be better to delete all tenant stacks if any if not stack_id_t1: heat_helper.create_stack(stack_name=stack1_name, template=stack1, auth_info=Tenant.get('tenant1'), timeout=1000, cleanup=None) stack_id_t2 = heat_helper.get_stacks(name=stack2_name, auth_info=Tenant.get('tenant2')) if not stack_id_t2: heat_helper.create_stack(stack_name=stack2_name, template=stack2, auth_info=Tenant.get('tenant2'), timeout=1000, cleanup=None) LOG.info("Checking all VMs are in active state") vms = get_all_vms() vm_helper.wait_for_vms_values(vms=vms, fail_ok=False)
def install_upgrade_license(license_path, timeout=30, con_ssh=None): """ Installs upgrade license on controller-0 Args: con_ssh (SSHClient): " SSH connection to controller-0" license_path (str): " license full path in controller-0" timeout (int); Returns (int): 0 - success; 1 - failure """ if con_ssh is None: con_ssh = ControllerClient.get_active_controller() cmd = "sudo license-install " + license_path con_ssh.send(cmd) end_time = time.time() + timeout rc = 1 while time.time() < end_time: index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.Y_N_PROMPT], timeout=timeout) if index == 2: con_ssh.send('y') if index == 1: con_ssh.send(HostLinuxUser.get_password()) if index == 0: rc = con_ssh.exec_cmd("echo $?")[0] con_ssh.flush() break return rc
def test_patch_log_what_requires(get_patch_name): """ Checks that the what_requires query is logged Test Steps: - Upload a patch and execute 'sw-patch what-requires' - Check log files for the expected logs """ patch_name = get_patch_name con_ssh = ControllerClient.get_active_controller() LOG.tc_step("Uploading patch {}".format(patch_name)) con_ssh.exec_sudo_cmd('sw-patch upload test_patches/{}.patch'.format(patch_name)) con_ssh.exec_sudo_cmd('sw-patch what-requires {}'.format(patch_name)) user = HostLinuxUser.get_user() search_for = ['sw-patch-controller-daemon.*INFO: Querying what requires patches:.*{}'.format(patch_name)] res_1 = check_logs(search_for, lines=10, api=False) search_for = ['sw-patch-controller-daemon.*INFO: User: {}/admin Action: ' 'Querying what requires patches:.*{}'.format(user, patch_name)] res_2 = check_logs(search_for, lines=10, api=True) LOG.tc_step("Deleting patch {}".format(patch_name)) con_ssh.exec_sudo_cmd('sw-patch delete {}'.format(patch_name)) assert res_1, "FAIL: uploading patches did not generate the expected " \ "logs in patching.log" assert res_2, "FAIL: uploading patches did not generate the expected logs " \ "in patching-api.log"
def _test_telnet_ldap_admin_access(user_name): """ Args: user_name: username of the ldap user should be admin for thist test Test Steps: - telnet to active controller - login as admin password admin. - verify that it can ls /home/sysadmin Teardowns: - Disconnect telnet """ if ProjVar.get_var('COLLECT_TELNET'): skip( 'Telnet is in use for collect log. This test which require telnet will be skipped' ) lab = ProjVar.get_var('LAB') nodes_info = node.create_node_dict(lab['controller_nodes'], 'controller') hostname = system_helper.get_active_controller_name() controller_node = nodes_info[hostname] password = "******" new_password = "******" telnet = TelnetClient(controller_node.telnet_ip, port=controller_node.telnet_port, hostname=hostname, user=user_name, password=new_password, timeout=10) try: LOG.tc_step("Telnet to lab as {} user with password {}".format( user_name, password)) telnet.login(expect_prompt_timeout=30, handle_init_login=True) code, output = telnet.exec_cmd('ls {}'.format( HostLinuxUser.get_home()), fail_ok=False) LOG.info('output from test {}'.format(output)) assert '*** forbidden' not in output, \ 'not able to ls to {} as admin user'.format( HostLinuxUser.get_home()) finally: telnet.send('exit') telnet.close()
def stx_monitor_file_exist(): con_ssh = ControllerClient.get_active_controller() home_dir = HostLinuxUser.get_home() stx_mon_file = '{}/{}'.format(home_dir, STX_MONITOR_TAR) LOG.info("Check if file %s is present" % stx_mon_file) return con_ssh.file_exists(stx_mon_file)
def remove_remote_cli(): LOG.fixture_step("(session) Remove remote cli clients") client.exec_cmd('rm -rf {}/*'.format(ProjVar.get_var('TEMP_DIR'))) client.close() from utils.clients.local import RemoteCLIClient RemoteCLIClient.remove_remote_cli_clients() ProjVar.set_var(REMOTE_CLI=None) ProjVar.set_var(USER_FILE_DIR=HostLinuxUser.get_home())