def ssh_to_stx(lab=None, set_client=False): if not lab: lab = ProjVar.get_var('LAB') user = HostLinuxUser.get_user() password = HostLinuxUser.get_password() if ProjVar.get_var('IPV6_OAM'): lab = convert_to_ipv6(lab) LOG.info("SSH to IPv6 system {} via tuxlab2".format(lab['short_name'])) tuxlab2_ip = YOW_TUXLAB2['ip'] tux_user = TestFileServer.get_user() tuxlab_prompt = r'{}@{}\:(.*)\$ '.format(tux_user, YOW_TUXLAB2['name']) tuxlab2_ssh = SSHClient(host=tuxlab2_ip, user=tux_user, password=TestFileServer.get_password(), initial_prompt=tuxlab_prompt) tuxlab2_ssh.connect(retry_timeout=300, retry_interval=30, timeout=60) con_ssh = SSHFromSSH(ssh_client=tuxlab2_ssh, host=lab['floating ip'], user=user, password=password, initial_prompt=Prompt.CONTROLLER_PROMPT) else: con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(), password=HostLinuxUser.get_password(), initial_prompt=Prompt.CONTROLLER_PROMPT) con_ssh.connect(retry=True, retry_timeout=30, use_current=False) if set_client: ControllerClient.set_active_controller(con_ssh) return con_ssh
def scp_from_local(source_path, dest_ip, dest_path=None, dest_user=None, dest_password=None, timeout=900, is_dir=False): """ Scp file(s) from localhost (i.e., from where the automated tests are executed). Args: source_path (str): source file/directory path dest_ip (str): ip of the destination host dest_user (str): username of destination host. dest_password (str): password of destination host dest_path (str): destination directory path to copy the file(s) to timeout (int): max time to wait for scp finish in seconds is_dir (bool): whether to copy a single file or a directory """ if not dest_path: dest_path = HostLinuxUser.get_home() if not dest_user: dest_user = HostLinuxUser.get_user() if not dest_password: dest_password = HostLinuxUser.get_password() dir_option = '-r ' if is_dir else '' cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}{} {}@{}:{}'.\ format(dir_option, source_path, dest_user, dest_ip, dest_path) _scp_on_local(cmd, remote_password=dest_password, timeout=timeout)
def _get_large_heat(con_ssh=None): """ copy the heat templates to TiS server. Args: con_ssh (SSHClient): Returns (str): TiS file path of the heat template """ file_dir = StxPath.CUSTOM_HEAT_TEMPLATES file_name = HeatTemplate.LARGE_HEAT file_path = file_dir + file_name source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name if con_ssh is None: con_ssh = ControllerClient.get_active_controller() LOG.info('Check if file already exists on TiS') if con_ssh.file_exists(file_path=file_path): LOG.info('dest path {} already exists. Return existing path'.format(file_path)) return file_path with host_helper.ssh_to_test_server() as ssh_to_server: ssh_to_server.rsync(source_file, html_helper.get_ip_addr(), file_dir, dest_user=HostLinuxUser.get_user(), dest_password=HostLinuxUser.get_password(), timeout=1200) return file_path
def __get_lab_ssh(labname, log_dir=None): """ Args: labname: log_dir: Returns (SSHClient): """ lab = get_lab_dict(labname) # Doesn't have to save logs # if log_dir is None: # log_dir = temp_dir = "/tmp/CGCSAUTO/" if log_dir is not None: ProjVar.set_var(log_dir=log_dir) ProjVar.set_var(lab=lab) ProjVar.set_var(source_openrc=True) con_ssh = SSHClient(lab.get('floating ip'), HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect() # if 'auth_url' in lab: # Tenant._set_url(lab['auth_url']) return con_ssh
def prep_test_on_host(target_ssh, target, file_path, active_controller_name, cyclictest_dir=CYCLICTEST_DIR): LOG.tc_step( "Copy cyclictest executable to target if not already exist: {}".format( target)) target_ssh.exec_cmd('mkdir -p {}; rm -f {}/*.*'.format( cyclictest_dir, cyclictest_dir)) dest_path = '{}/{}'.format(cyclictest_dir, os.path.basename(file_path)) if not target_ssh.file_exists(dest_path): LOG.info('Copy CYCLICTEST to selected host {}:{}'.format( target, dest_path)) target_ssh.scp_on_dest(HostLinuxUser.get_user(), active_controller_name, dest_path=dest_path, source_path=file_path, source_pswd=HostLinuxUser.get_password()) LOG.info('Check if CYCLICTEST was copied to target host') assert target_ssh.file_exists(dest_path), \ 'Failed to find CYCLICTEST executable on target host after copied' LOG.info('-successfully copied to {}:{}'.format(target, file_path))
def install_upgrade_license(license_path, timeout=30, con_ssh=None): """ Installs upgrade license on controller-0 Args: con_ssh (SSHClient): " SSH connection to controller-0" license_path (str): " license full path in controller-0" timeout (int); Returns (int): 0 - success; 1 - failure """ if con_ssh is None: con_ssh = ControllerClient.get_active_controller() cmd = "sudo license-install " + license_path con_ssh.send(cmd) end_time = time.time() + timeout rc = 1 while time.time() < end_time: index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.Y_N_PROMPT], timeout=timeout) if index == 2: con_ssh.send('y') if index == 1: con_ssh.send(HostLinuxUser.get_password()) if index == 0: rc = con_ssh.exec_cmd("echo $?")[0] con_ssh.flush() break return rc
def get_current_user_password(cls, con_ssh=None): if not con_ssh: con_ssh = ControllerClient.get_active_controller() user = con_ssh.get_current_user() if user == HostLinuxUser.get_user(): return user, HostLinuxUser.get_password() return user, cls.__users[user]
def test_linux_user_lockout(): """ Verify linux user account will be lockout after 5? failed attempts Test Steps: - attempt to login with invalid password as sysadmin 5 times - verify cannot login as sysadmin anymore Returns: """ LOG.tc_step( 'Attempt to login with WRONG password as sysadmin {} times'.format( MAX_FAILED_LOGINS)) user = '******' if HostLinuxUser.get_user() != user: skip( 'Error: user name from HostLinuxCreds.get_user() != sysadmin, it is:{}' .format(HostLinuxUser.get_user())) password = HostLinuxUser.get_password() invalid_password = '******' host = lab_info.get_lab_floating_ip() LOG.info('verify we can login in at beginning') connect = log_in_raw(host, user, password, expect_fail=False) assert connect, 'Failed to login in at beginning with password'.format( password) for n in range(1, MAX_FAILED_LOGINS + 1): message = '{}: Expecting to fail to login with invalid password, host:{}, user:{}, password:{}\n'.format( n, host, user, invalid_password) LOG.info(message) connect = log_in_raw(host, user, invalid_password, expect_fail=True) assert not connect, 'Expecting to fail but not.' + message LOG.info( 'OK, failed {} times to login with invalid password:{} as user:{} to host:{}\n' .format(MAX_FAILED_LOGINS, invalid_password, user, host)) LOG.tc_step( 'Now attempt to login with CORRECT password:{}, expecting to fail\n'. format(password)) connect = log_in_raw(host, user, password, expect_fail=True) message = 'host:{}, user:{}, password:{}\n'.format(host, user, password) assert not connect, 'Expecting to fail but not.' + message LOG.info( 'OK, failed to login with CORRECT password due to the user account was locked down\n' ) LOG.tc_step( 'Wait for 5 minutes + 20 seconds for the account been automatically unlocked\n' ) time.sleep(320) LOG.info('verify we can login again after waiting for 5 minutes') connect = log_in_raw(host, user, password, expect_fail=False) assert connect, 'Failed to login again after waiting for 5 minutes.' + message
def restore_platform(): """ Test ansible restore_platform on controller-0 Test Steps: - Prepare restore environment - ssh to given machine - collect logs - copy backup.tgz from test server to machine - collect logs - ansible-playbook restore_platform.yml """ prepare_restore_env() # Ssh to machine that will become controller-0, c0_ip = get_ipv4_controller_0() prompt = r'.*\:~\$' con_ssh = SSHClient(host=c0_ip, user='******', password='******', initial_prompt=prompt) con_ssh.connect() # Test step 1 backup_dest_path = STORE_BACKUP_PATH LOG.tc_step( "Copy from test server {} to controller-0".format(backup_dest_path)) common.scp_from_test_server_to_active_controller(backup_dest_path, '~/', con_ssh=con_ssh, force_ipv4=True) wipe_ceph_osds = '' if HAS_WIPE_CEPH_OSDS and WIPE_CEPH_OSDS: wipe_ceph_osds = 'wipe_ceph_osds=true' if HAS_WIPE_CEPH_OSDS and not WIPE_CEPH_OSDS: wipe_ceph_osds = 'wipe_ceph_osds=false' # Test step 2 cmd = "ansible-playbook {} -e ".format(RESTORE_PLATFORM_PLAYBOOK) \ + "\"initial_backup_dir=/home/sysadmin " \ + wipe_ceph_osds + " " \ + "ansible_become_pass="******" " \ + "admin_password="******" " \ + "backup_filename=" + os.path.basename(STORE_BACKUP_PATH) + "\"" LOG.tc_step("Run " + cmd) rc, output = con_ssh.exec_cmd(cmd, expect_timeout=RESTORE_WAIT_TIMEOUT) # Here prompt will change when collecting logs on controller-0 con_ssh.set_prompt(r'.*\$') collect_logs(con_ssh, c0_ip, 'after restore') assert rc == 0 and analyze_ansible_output(output)[0] == 0, \ "{} execution failed: {} {}".format(cmd, rc, output)
def setup_tis_ssh(lab): con_ssh = ControllerClient.get_active_controller(fail_ok=True) if con_ssh is None: con_ssh = SSHClient(lab['floating ip'], HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect(retry=True, retry_timeout=30) ControllerClient.set_active_controller(con_ssh) return con_ssh
def test_enable_tpm(swact_first): con_ssh = ControllerClient.get_active_controller() LOG.tc_step('Check if TPM is already configured') code, cert_id, cert_type = get_tpm_status(con_ssh) if code == 0: LOG.info('TPM already configured on the lab, cert_id:{}, cert_type:{}'. format(cert_id, cert_type)) LOG.tc_step('disable TPM first in order to test enabling TPM') code, output = remove_cert_from_tpm(con_ssh, fail_ok=False, check_first=False) assert 0 == code, 'failed to disable TPM' time.sleep(30) LOG.info('Waiting alarm: out-of-config cleaned up') system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE) else: LOG.info('TPM is NOT configured on the lab') LOG.info('-code:{}, cert_id:{}, cert_type:{}'.format( code, cert_id, cert_type)) if swact_first: LOG.tc_step('Swact the active controller as instructed') if len(system_helper.get_controllers()) < 2: LOG.info('Less than 2 controllers, skip swact') else: host_helper.swact_host(fail_ok=False) copy_config_from_local( con_ssh, local_conf_backup_dir, os.path.join(HostLinuxUser.get_home(), conf_backup_dir)) LOG.tc_step('Install HTTPS Certificate into TPM') code, output = store_cert_into_tpm( con_ssh, check_first=False, fail_ok=False, pem_password=HostLinuxUser.get_password()) assert 0 == code, 'Failed to instll certificate into TPM, cert-file' LOG.info('OK, certificate is installed into TPM') LOG.info('Wait the out-of-config alarm cleared') system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE) LOG.tc_step( 'Verify the configurations changes for impacted components, expecting all changes exit' ) verify_configuration_changes(expected=True, connection=con_ssh)
def ssh_to_stx(lab=None, set_client=False): if not lab: lab = ProjVar.get_var('LAB') con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(), password=HostLinuxUser.get_password(), initial_prompt=Prompt.CONTROLLER_PROMPT) con_ssh.connect(retry=True, retry_timeout=30, use_current=False) if set_client: ControllerClient.set_active_controller(con_ssh) return con_ssh
def test_configure_external_ceph(ceph_lab, ceph_services): """ This test will configure external ceph on a system. Currently this is only supported on wcp3-6, using wcp7-12 as the external ceph system. In order to support this, the user will need to install wcp7-12 in Region mode, then wcp3-6 needs to be installed with a custom config that includes an infrastructure interface. The ceph.conf file needs to be then copied from wcp7-12 onto wcp3-6. This conf file needs to be renamed to something other than ceph.conf and then used to enable external ceph. Only the following services can be enabled on external ceph: cinder, nova and glance. Swift is not supported. Once external ceph is enabled, a new cinder type will be added and resource creation will default to the external ceph backend (depending on what services were provisioned in the first place). Test Steps: 1. Copy ceph.conf from wcp7-12 2. Provision external ceph services on wcp3-6 TODO: - Add an infra ping test from regular lab to ceph lab - skip if fails """ LOG.tc_step("Retrieve ceph.conf from the external ceph system") con_ssh = ControllerClient.get_active_controller() ceph_lab = get_lab_dict(ceph_lab) source_server = ceph_lab['floating ip'] source_lab_name = ceph_lab['short_name'] source_filepath = "/etc/ceph/ceph.conf" dest_filepath = "/home/sysadmin/ceph_{}.conf".format(source_lab_name) con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), source_ip=source_server, source_pswd=HostLinuxUser.get_password(), source_path=source_filepath, dest_path=dest_filepath, timeout=60) LOG.tc_step( "Confirm ceph.conf file was successfully copied before proceeding") if not con_ssh.file_exists(dest_filepath): skip("External ceph.conf not present on the system") LOG.tc_step("Provision storage-backend for external ceph") add_external_ceph(dest_filepath, ceph_services.split(sep='_'))
def __init__(self, host, prompt=None, port=0, timeout=30, hostname=None, user=HostLinuxUser.get_user(), password=HostLinuxUser.get_password(), negotiate=False, vt100query=False, console_log_file=None): self.logger = LOG super(TelnetClient, self).__init__(host=host, port=port, timeout=timeout) if not hostname: self.send('\r\n\r\n') prompts = [LOGIN_REGEX, LOGGED_IN_REGEX] index, re_obj, matched_text = super().expect(prompts, timeout=10) if index in (0, 1): hostname = prompts[index].search(matched_text).group(1).decode( errors='ignore') if not prompt: prompt = r':~\$ ' #-- mod begins self.console_log_file = self.get_log_file(console_log_file) self.negotiate = negotiate self.vt100query = vt100query if self.vt100query: self.vt100querybuffer = b'' # Buffer for VT100 queries #-- mod ends self.flush(timeout=1) self.logger = telnet_logger(hostname) if hostname else telnet_logger( host + ":" + str(port)) self.hostname = hostname self.prompt = prompt self.cmd_output = '' self.cmd_sent = '' self.timeout = timeout self.user = user self.password = password self.logger.info( 'Telnet connection to {}:{} ({}) is established'.format( host, port, hostname))
def download_patches(lab, server, patch_dir, conn_ssh=None): """ Args: lab: server: patch_dir: conn_ssh: Returns: """ patches = {} rc, output = server.ssh_conn.exec_cmd( "ls -1 --color=none {}/*.patch".format(patch_dir)) assert rc == 0, "Failed to list patch files in directory path {}.".format( patch_dir) if output is not None: patch_dest_dir = HostLinuxUser.get_home() + "patches/" active_controller = system_helper.get_active_controller_name() dest_server = lab[active_controller + ' ip'] ssh_port = None pre_opts = 'sshpass -p "{0}"'.format(HostLinuxUser.get_password()) server.ssh_conn.rsync(patch_dir + "/*.patch", dest_server, patch_dest_dir, ssh_port=ssh_port, pre_opts=pre_opts) if conn_ssh is None: conn_ssh = ControllerClient.get_active_controller() rc, output = conn_ssh.exec_cmd( "ls -1 {}/*.patch".format(patch_dest_dir)) assert rc == 0, "Failed to list downloaded patch files in directory path {}.".format( patch_dest_dir) if output is not None: for item in output.splitlines(): patches[os.path.splitext(os.path.basename(item))[0]] = item patch_ids = " ".join(patches.keys()) LOG.info("List of patches:\n {}".format(patch_ids)) return patches
def scp_from_active_controller_to_localhost( source_path, dest_path='', src_user=None, src_password=None, timeout=900, is_dir=False): active_cont_ip = ControllerClient.get_active_controller().host if not src_user: src_user = HostLinuxUser.get_user() if not src_password: src_password = HostLinuxUser.get_password() return scp_to_local(source_path=source_path, source_ip=active_cont_ip, source_user=src_user, source_password=src_password, dest_path=dest_path, timeout=timeout, is_dir=is_dir)
def scp_from_localhost_to_active_controller( source_path, dest_path=None, dest_user=None, dest_password=None, timeout=900, is_dir=False): active_cont_ip = ControllerClient.get_active_controller().host if not dest_path: dest_path = HostLinuxUser.get_home() if not dest_user: dest_user = HostLinuxUser.get_user() if not dest_password: dest_password = HostLinuxUser.get_password() return scp_from_local(source_path, active_cont_ip, dest_path=dest_path, dest_user=dest_user, dest_password=dest_password, timeout=timeout, is_dir=is_dir)
def setup_vbox_tis_ssh(lab): if 'external_ip' in lab.keys(): con_ssh = ControllerClient.get_active_controller(fail_ok=True) if con_ssh: con_ssh.disconnect() con_ssh = SSHClient(lab['external_ip'], HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT, port=lab['external_port']) con_ssh.connect(retry=True, retry_timeout=30) ControllerClient.set_active_controller(con_ssh) else: con_ssh = setup_tis_ssh(lab) return con_ssh
def test_backup_platform(prepare_backup_env, controller): """ Test ansible backup Args: prepare_backup_env: module fixture controller: test param Setups: - Create STORE_BACKUP_PATH dir on test server Test Steps: - ssh to given controller - ansible-playbook backup.yml - copy backup.tgz from active controller to test server """ host = controller_precheck(controller) with host_helper.ssh_to_host(hostname=host) as con_ssh: cmd = "ansible-playbook {} -e ".format(BACKUP_PLAYBOOK) \ + "\"ansible_become_pass="******" " \ + "admin_password="******"\"" LOG.tc_step("Run " + cmd) collect_logs('before backup') rc, output = con_ssh.exec_cmd(cmd, expect_timeout=BACKUP_WAIT_TIMEOUT) collect_logs('after backup') assert rc == 0 and analyze_ansible_output(output)[0] == 0, \ "{} execution failed: {} {}".format(cmd, rc, output) cmd = "ls -tr " + StxPath.BACKUPS + " | grep backup | tail -1" rc, backup_archive = con_ssh.exec_cmd(cmd) backup_src_path = os.path.join(StxPath.BACKUPS, backup_archive) backup_dest_path = os.path.join(STORE_BACKUP_PATH, backup_archive) LOG.tc_step("Copy from controller {} to test server {}".format( backup_src_path, backup_dest_path)) common.scp_from_active_controller_to_test_server( os.path.join(StxPath.BACKUPS, backup_archive), backup_dest_path)
def delete_object_file(object_path, rm_dir=False, client=None): def _delete_on_client(client_): cmd = "ls {}".format(object_path) rc, output = client_.exec_cmd(cmd) if rc == 0: cmd = 'rm {} {}'.format('-r' if rm_dir else '', object_path) client_.exec_cmd(cmd) LOG.info("Files deleted {}: {}".format(object_path, output)) if not client: client = get_cli_client() _delete_on_client(client_=client) if not ProjVar.get_var('REMOTE_CLI'): standby_controller = system_helper.get_standby_controller_name() with host_helper.ssh_to_host( standby_controller, username=HostLinuxUser.get_user(), password=HostLinuxUser.get_password()) as standby_ssh: _delete_on_client(client_=standby_ssh) return True
def scp_to_local(dest_path, source_path, source_server=None, source_user=None, source_password=None, timeout=900, is_dir=False, ipv6=None): """ Scp file(s) to localhost (i.e., to where the automated tests are executed). Args: source_path (str): source file/directory path source_server (str): ip of the source host. source_user (str): username of source host. source_password (str): password of source host dest_path (str): destination directory path to copy the file(s) to timeout (int): max time to wait for scp finish in seconds is_dir (bool): whether to copy a single file or a directory ipv6 """ if not source_user: source_user = HostLinuxUser.get_user() if not source_password: source_password = HostLinuxUser.get_password() dir_option = '-r ' if is_dir else '' ipv6_arg = '' if get_ip_version(source_server) == 6: ipv6_arg = '-6 ' source_server = '[{}]'.format(source_server) elif ipv6: ipv6_arg = '-6 ' cmd = 'scp {}-oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}{}@{}:{} {}'.\ format(ipv6_arg, dir_option, source_user, source_server, source_path, dest_path) _scp_on_local(cmd, remote_password=source_password, timeout=timeout)
class LinuxUser: users = {HostLinuxUser.get_user(): HostLinuxUser.get_password()} con_ssh = None def __init__(self, user, password, con_ssh=None): self.user = user self.password = password self.added = False self.con_ssh = con_ssh if con_ssh is not None else \ ControllerClient.get_active_controller() def add_user(self): self.added = True LinuxUser.users[self.user] = self.password raise NotImplementedError def modify_password(self): raise NotImplementedError def delete_user(self): raise NotImplementedError def login(self): raise NotImplementedError @classmethod def get_user_password(cls): raise NotImplementedError @classmethod def get_current_user_password(cls, con_ssh=None): if con_ssh: cls.con_ssh = con_ssh elif not cls.con_ssh: cls.con_ssh = ControllerClient.get_active_controller() user = cls.con_ssh.get_current_user() return user, cls.users[user]
def test_ima_event_generation(operation, file_path): """ Following IMA violation scenarios are covered: - append/edit data to/of a monitored file, result in changing of the hash - dynamic library changes - create and execute a files as sysadmin Test Steps: - Perform specified file operations - Check IMA violation event is logged """ global files_to_delete con_ssh = ControllerClient.get_active_controller() start_time = common.get_date_in_format() source_file = file_path backup_file = None if operation in ('edit_and_execute', 'append_and_execute'): dest_file = "/usr/sbin/TEMP" copy_file(source_file, dest_file, cleanup='dest') if operation == 'edit_and_execute': LOG.tc_step("Open copy of monitored file and save") cmd = "vim {} '+:wq!'".format(dest_file) con_ssh.exec_sudo_cmd(cmd, fail_ok=False) execute_cmd = "{} -p".format(dest_file) else: LOG.tc_step("Append to copy of monitored file") cmd = 'echo "output" | sudo -S tee -a /usr/sbin/TEMP'.format( HostLinuxUser.get_password()) con_ssh.exec_cmd(cmd, fail_ok=False) LOG.tc_step("Execute modified file") con_ssh.exec_sudo_cmd(dest_file) execute_cmd = "{}".format(dest_file) LOG.tc_step("Execute modified file") con_ssh.exec_sudo_cmd(execute_cmd) elif operation == 'replace_library': backup_file = "/root/{}".format(source_file.split('/')[-1]) dest_file_nocsum = "/root/TEMP" LOG.info("Backup source file {} to {}".format(source_file, backup_file)) copy_file(source_file, backup_file) LOG.info("Copy the library without the checksum") copy_file(source_file, dest_file_nocsum, preserve=False) LOG.info("Replace the library with the unsigned one") move_file(dest_file_nocsum, source_file) elif operation == 'create_and_execute': dest_file = "{}/TEMP".format(HostLinuxUser.get_home()) create_and_execute(file_path=dest_file, sudo=True) LOG.tc_step("Check for IMA event") ima_events = system_helper.wait_for_events(start=start_time, timeout=60, num=10, event_log_id=EventLogID.IMA, state='log', severity='major', fail_ok=True, strict=False) if backup_file: LOG.info("Restore backup file {} to {}".format(backup_file, source_file)) move_file(backup_file, source_file) assert ima_events, "IMA event is not generated after {} on " \ "{}".format(operation, file_path)
def fetch_cert_file(ssh_client, search_path=None): save_cert_to = os.path.dirname(SecurityPath.ALT_CERT_PATH) code, output = ssh_client.exec_cmd('mkdir -p {}'.format(save_cert_to), fail_ok=True) if code != 0: msg = 'failed to create path for certificate files:{}, error:'.format( save_cert_to, output) LOG.warn(msg) return code, msg from_server = build_server.DEFAULT_BUILD_SERVER['ip'] prompt = r'\[{}@.* \~\]\$'.format(TestFileServer.get_user()) ssh_to_server = SSHFromSSH(ssh_client, from_server, TestFileServer.get_user(), TestFileServer.get_password(), initial_prompt=prompt) ssh_to_server.connect(retry=5) if search_path is None: search_path = os.path.join(BuildServerPath.DEFAULT_HOST_BUILD_PATH, BuildServerPath.LAB_CONF_DIR_PREV) search_cmd = "\\find {} -maxdepth 5 -type f -name '*.pem'".format( search_path) code, output = ssh_to_server.exec_cmd(search_cmd, fail_ok=True) lab_name = ProjVar.get_var('lab')['name'] LOG.info('Get the PEM for current lab ({}) first'.format(lab_name)) if code == 0 and output: for file in output.splitlines(): exiting_lab_name = os.path.basename(os.path.dirname(file)) if exiting_lab_name in lab_name or lab_name in exiting_lab_name: certificate_file = file break else: certificate_file = output.splitlines()[0] else: msg = 'failed to fetch cert-file from build server, tried path:{}, server:{}'.format( search_path, from_server) LOG.warn(msg) return -1, msg LOG.info( 'found cert-file on build server, trying to scp to current active controller\ncert-file:{}' .format(certificate_file)) scp_cmd = \ 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {} ' \ '{}@{}:{}'.format( certificate_file, HostLinuxUser.get_user(), lab_info.get_lab_floating_ip(), save_cert_to) ssh_to_server.send(scp_cmd) timeout = 60 output_index = ssh_to_server.expect( [ssh_to_server.prompt, Prompt.PASSWORD_PROMPT], timeout=timeout) if output_index == 2: ssh_to_server.send('yes') output_index = ssh_to_server.expect( [ssh_to_server.prompt, Prompt.PASSWORD_PROMPT], timeout=timeout) if output_index == 1: ssh_to_server.send(HostLinuxUser.get_password()) output_index = ssh_to_server.expect(timeout=timeout) assert output_index == 0, "Failed to scp files" exit_code = ssh_to_server.get_exit_code() assert 0 == exit_code, "scp not fully succeeded" ssh_to_server.close() copied_cert_file = os.path.join(save_cert_to, os.path.basename(certificate_file)) ssh_client.exec_cmd('ls -l {}; mv {} {}.bk'.format(copied_cert_file, copied_cert_file, copied_cert_file)) return 0, copied_cert_file + '.bk'
class LdapUserManager(object, metaclass=Singleton): """ The LDAP User Manager """ LINUX_ROOT_PASSWORD = HostLinuxUser.get_password() KEYSTONE_USER_NAME = Tenant.get('admin')['user'] KEYSTONE_USER_DOMAIN_NAME = 'Default' KEYSTONE_PASSWORD = Tenant.get('admin')['password'] PROJECT_NAME = 'admin' PROJECT_DOMAIN_NAME = 'Default' def __init__(self, ssh_con=None): if ssh_con is not None: self.ssh_con = ssh_con else: self.ssh_con = ControllerClient.get_active_controller() self.users_info = {} def ssh_to_host(self, host=None): """ Get the ssh connection to the active controller or the specified host (if it's the case) Args: host (str): the host to ssh to, using the active controller if it's unset or None Returns (object): the ssh connection session to the active controller """ if host is None: return self.ssh_con else: return SSHClient(host=host) def get_ldap_admin_password(self): """ Get the LDAP Administrator's password Args: Returns (str): The password of the LDAP Administrator """ cmd = 'grep "credentials" /etc/openldap/slapd.conf.backup' self.ssh_con.flush() code, output = self.ssh_con.exec_sudo_cmd(cmd) if 0 == code and output.strip(): for line in output.strip().splitlines(): if 'credentials' in line and '=' in line: password = line.split('=')[1] return password return '' def get_ldap_user_password(self, user_name): """ Get the password of the LDAP User Args: user_name (str): the user name Returns (str): the password of the user """ if user_name in self.users_info and \ self.users_info[user_name]['passwords']: return self.users_info[user_name]['passwords'][-1] return None def login_as_ldap_user_first_time(self, user_name, new_password=None, host=None): """ Login with the specified LDAP User for the first time, during which change the initial password as a required step. Args: user_name (str): user name of the LDAP user new_password (str): password of the LDAP user host (str): host name to which the user will login Returns (tuple): results (bool): True if success, otherwise False password (str): new password of the LDAP user """ hostname_ip = 'controller-1' if host is None else host if new_password is not None: password = new_password else: password = '******'.format(''.join( random.sample(user_name, len(user_name)))) cmd_expected = [ ('ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format( user_name, hostname_ip), (r'Are you sure you want to continue connecting (yes/no)?', ), ('Failed to get "continue connecting" prompt', )), ( 'yes', # ("{}@{}'s password:"******".*@{}'s password: "******""" Find the LDAP User with the specified name Args: user_name (str): - user name of the LDAP User to search for Returns: existing_flag (boolean) - True, the LDAP User with the specified name existing - False, cannot find a LDAP User with the specified name user_info (dict): - user information """ cmd = 'ldapfinger -u {}'.format(user_name) self.ssh_con.flush() code, output = self.ssh_con.exec_sudo_cmd(cmd, fail_ok=True, strict_passwd_prompt=True) found = False user_info = {} if output.strip(): for line in output.strip().splitlines(): if line.startswith('dn: '): user_info['dn'] = line.split()[1].strip() elif line.startswith('cn: '): user_info['cn'] = line.split()[1].strip() elif line.startswith('uid: '): user_info['uid'] = line.split()[1].strip() elif line.startswith('uidNumber: '): user_info['uid_number'] = int(line.split()[1].strip()) elif line.startswith('gidNumber: '): user_info['gid_number'] = int(line.split()[1].strip()) elif line.startswith('homeDirectory: '): user_info['home_directory'] = line.split()[1].strip() elif line.startswith('userPassword:: '): user_info['user_password'] = line.split()[1].strip() elif line.startswith('loginShell: '): user_info['login_shell'] = line.split()[1].strip() elif line.startswith('shadowMax: '): user_info['shadow_max'] = int(line.split()[1].strip()) elif line.startswith('shadowWarning: '): user_info['shadow_warning'] = int(line.split()[1].strip()) else: pass else: found = True return found, user_info def rm_ldap_user(self, user_name): """ Delete the LDAP User with the specified name Args: user_name: Returns (tuple): code - 0 successfully deleted the specified LDAP User otherwise: failed output - message from the deleting CLI """ cmd = 'ldapdeleteuser {}'.format(user_name) self.ssh_con.flush() code, output = self.ssh_con.exec_sudo_cmd(cmd, fail_ok=True) if 0 == code and user_name in self.users_info: del self.users_info[user_name] return code, output @staticmethod def validate_user_settings(secondary_group=False, secondary_group_name=None, password_expiry_days=90, password_expiry_warn_days=2): """ Validate the settings to be used as attributes of a LDAP User Args: secondary_group (bool): True - Secondary group to add user to False - No secondary group secondary_group_name (str): Name of secondary group (will be ignored if secondary_group is False password_expiry_days (int): password_expiry_warn_days (int): Returns: """ try: opt_expiry_days = int(password_expiry_days) opt_expiry_warn_days = int(password_expiry_warn_days) bool(secondary_group) str(secondary_group_name) except ValueError: return 1, 'invalid input: {}, {}'.format( password_expiry_days, password_expiry_warn_days) if opt_expiry_days <= 0: return 4, 'invalid password expiry days:{}'.format(opt_expiry_days) if opt_expiry_warn_days <= 0: return 5, 'invalid password expiry days:{}'.format( opt_expiry_warn_days) return 0, '' def create_ldap_user(self, user_name, sudoer=False, secondary_group=False, secondary_group_name=None, password_expiry_days=90, password_expiry_warn_days=2, delete_if_existing=True, check_if_existing=True): """ Args: user_name (str): user name of the LDAP User sudoer (boo) True - Add the user to sudoer list False - Do not add the user to sudoer list secondary_group (bool): True - Secondary group to add user to False - No secondary group secondary_group_name (str): Name of secondary group (will be ignored if secondary_group is False password_expiry_days (int): password_expiry_warn_days (int): delete_if_existing (bool): True - Delete the user if it is already existing False - Return the existing LDAP User check_if_existing (bool): True - Check if the LDAP User existing with the specified name False - Do not check if any LDAP Users with the specified name existing Returns tuple(code, user_infor): code (int): -1 -- a LDAP User already existing with the same name ( don't care other attributes for now) 0 -- successfully created a LDAP User withe specified name and attributes 1 -- a LDAP User already existing but fail_on_existing specified 2 -- CLI to create a user succeeded but cannot find the user after 3 -- failed to create a LDAP User (the CLI failed) 4 -- failed to change the initial password and login the first time 5 -- invalid inputs """ password_expiry_days = 90 if password_expiry_days is None else \ password_expiry_days password_expiry_warn_days = 2 if password_expiry_warn_days is None \ else password_expiry_warn_days secondary_group = False if secondary_group is None else secondary_group secondary_group_name = '' if secondary_group_name is None else \ secondary_group_name code, message = self.validate_user_settings( secondary_group=secondary_group, secondary_group_name=secondary_group_name, password_expiry_days=password_expiry_days, password_expiry_warn_days=password_expiry_warn_days) if 0 != code: return 5, {} if check_if_existing: existing, user_info = self.find_ldap_user(user_name) if existing: if delete_if_existing: code, message = self.rm_ldap_user(user_name) if 0 != code: return 1, user_info else: return -1, user_info cmds_expectings = [ ('sudo ldapusersetup', (r'Enter username to add to LDAP:', ), ()), ( '{}'.format(user_name), (r'Add {} to sudoer list? (yes/NO): '.format(user_name), ), ('Critical setup error: cannot add user.*', ), ), ('yes' if sudoer else 'NO', (r'Add .* to secondary user group\? \(yes/NO\):', ), ()), ] if secondary_group: cmds_expectings += [ ('yes', (r'Secondary group to add user to? [wrs_protected]: ', ), ()), ('{}'.format(secondary_group_name), (r'Enter days after which user password must be changed ' r'\[{}\]:'.format(password_expiry_days), ), ()) ] else: cmds_expectings += [ ( 'NO', (r'Enter days after which user password must be changed ' r'\[{}\]:'.format(password_expiry_days), ), (), ), ] cmds_expectings += [ ( '{}'.format(password_expiry_days), (r'Enter days before password is to expire that user is ' r'warned \[{}\]:'.format(password_expiry_warn_days), ), (), ), ( '{}'.format(password_expiry_warn_days), ( 'Successfully modified user entry uid=m-user01,ou=People,' 'dc=cgcs,dc=local in LDAP', 'Updating password expiry to {} days'.format( password_expiry_warn_days), ), (), ) ] created = True self.ssh_con.flush() for cmd, outputs, errors in cmds_expectings: self.ssh_con.send(cmd) expected_outputs = list(outputs + errors) index = self.ssh_con.expect(blob_list=expected_outputs, fail_ok=True) if len(outputs) <= index: created = False break expected_outputs[:] = [] time.sleep(3) user_info = {} if created: existing, user_info = self.find_ldap_user(user_name) if existing: success, password = self.login_as_ldap_user_first_time( user_name) if not success: code = 4 else: user_info['passwords'] = [password] self.users_info[user_name] = user_info code = 0 else: code = 2 else: code = 3 return code, user_info def login_as_ldap_user(self, user_name, password, host=None, pre_store=False, disconnect_after=False): """ Login as the specified user name and password onto the specified host Args: user_name (str): user name password (str): password host (str): host to login to pre_store (bool): True - pre-store keystone user credentials for session False - chose 'N' (by default) meaning do not pre-store keystone user credentials disconnect_after (bool): True - disconnect the logged in session False - keep the logged in session Returns (tuple): logged_in (bool) - True if successfully logged into the specified host using the specified user/password password (str) - the password used to login ssh_con (object) - the ssh session logged in """ if not host: host = 'controller-1' if system_helper.is_aio_simplex(): host = 'controller-0' prompt_keystone_user_name = r'Enter Keystone username \[{}\]: '.format( user_name) cmd_expected = ( ( 'ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format( user_name, host), (r'Are you sure you want to continue connecting \(yes/no\)\?', ), ('ssh: Could not resolve hostname {}: Name or service not ' 'known'.format(host), ), ), ( 'yes', (r'{}@{}\'s password: '******'{}'.format(password), ( prompt_keystone_user_name, Prompt.CONTROLLER_PROMPT, ), (r'Permission denied, please try again\.', ), ), ) logged_in = False self.ssh_con.flush() for i in range(len(cmd_expected)): cmd, expected, errors = cmd_expected[i] LOG.info('cmd={}\nexpected={}\nerrors={}\n'.format( cmd, expected, errors)) self.ssh_con.send(cmd) index = self.ssh_con.expect(blob_list=list(expected + errors)) if len(expected) <= index: break elif 3 == i: if expected[index] == prompt_keystone_user_name: assert pre_store, \ 'pre_store is False, while selecting "y" to ' \ '"Pre-store Keystone user credentials ' \ 'for this session!"' else: logged_in = True break else: logged_in = True if logged_in: if disconnect_after: self.ssh_con.send('exit') return logged_in, password, self.ssh_con def change_ldap_user_password(self, user_name, password, new_password, change_own_password=True, check_if_existing=True, host=None, disconnect_after=False): """ Modify the password of the specified user to the new one Args: user_name (str): - name of the LDAP User password (str): - password of the LDAP User new_password (str): - new password to change to change_own_password (bool): check_if_existing (bool): - True: check if the user already existing first False: change the password without checking the existence of the user host (str): - The host to log into disconnect_after (bool) - True: disconnect the ssh connection after changing the password - False: keep the ssh connection Returns (bool): True if successful, False otherwise """ if check_if_existing: found, user_info = self.find_ldap_user(user_name) if not found: return False if not change_own_password: return False logged_in, password, ssh_con = \ self.login_as_ldap_user(user_name, password=password, host=host, disconnect_after=False) if not logged_in or not password or not ssh_con: return False, ssh_con cmds_expected = ( ( 'passwd', (r'\(current\) LDAP Password: '******'New password: '******'passwd: Authentication token manipulation error', EOF, ), ), ( new_password, ('Retype new password: '******'BAD PASSWORD: The password is too similar to the old one', 'BAD PASSWORD: No password supplied', 'passwd: Have exhausted maximum number of retries for ' 'service', EOF, ), ), ( new_password, ('passwd: all authentication tokens updated successfully.', ), (), ), ) changed = True ssh_con.flush() for cmd, expected, errors in cmds_expected: ssh_con.send(cmd) index = ssh_con.expect(blob_list=list(expected + errors)) if len(expected) <= index: changed = False break if disconnect_after: ssh_con.send('exit') return changed, ssh_con
def setup_keypair(con_ssh, natbox_client=None): """ copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ Args: natbox_client (SSHClient): NATBox client con_ssh (SSHClient) """ """ copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ Args: natbox_client (SSHClient): NATBox client con_ssh (SSHClient) """ if not container_helper.is_stx_openstack_deployed(con_ssh=con_ssh): LOG.info("stx-openstack is not applied. Skip nova keypair config.") return # ssh private key should now exist under keyfile_path if not natbox_client: natbox_client = NATBoxClient.get_natbox_client() LOG.info("scp key file from controller to NATBox") # keyfile path that can be specified in testcase config keyfile_stx_origin = os.path.normpath(ProjVar.get_var('STX_KEYFILE_PATH')) # keyfile will always be copied to sysadmin home dir first and update file # permission keyfile_stx_final = os.path.normpath( ProjVar.get_var('STX_KEYFILE_SYS_HOME')) public_key_stx = '{}.pub'.format(keyfile_stx_final) # keyfile will also be saved to /opt/platform as well, so it won't be # lost during system upgrade. keyfile_opt_pform = '/opt/platform/{}'.format( os.path.basename(keyfile_stx_final)) # copy keyfile to following NatBox location. This can be specified in # testcase config keyfile_path_natbox = os.path.normpath( ProjVar.get_var('NATBOX_KEYFILE_PATH')) auth_info = Tenant.get_primary() keypair_name = auth_info.get('nova_keypair', 'keypair-{}'.format(auth_info['user'])) nova_keypair = nova_helper.get_keypairs(name=keypair_name, auth_info=auth_info) linux_user = HostLinuxUser.get_user() nonroot_group = _get_nonroot_group(con_ssh=con_ssh, user=linux_user) if not con_ssh.file_exists(keyfile_stx_final): with host_helper.ssh_to_host('controller-0', con_ssh=con_ssh) as con_0_ssh: if not con_0_ssh.file_exists(keyfile_opt_pform): if con_0_ssh.file_exists(keyfile_stx_origin): # Given private key file exists. Need to ensure public # key exists in same dir. if not con_0_ssh.file_exists('{}.pub'.format( keyfile_stx_origin)) and not nova_keypair: raise FileNotFoundError( '{}.pub is not found'.format(keyfile_stx_origin)) else: # Need to generate ssh key if nova_keypair: raise FileNotFoundError( "Cannot find private key for existing nova " "keypair {}".format(nova_keypair)) con_0_ssh.exec_cmd( "ssh-keygen -f '{}' -t rsa -N ''".format( keyfile_stx_origin), fail_ok=False) if not con_0_ssh.file_exists(keyfile_stx_origin): raise FileNotFoundError( "{} not found after ssh-keygen".format( keyfile_stx_origin)) # keyfile_stx_origin and matching public key should now exist # on controller-0 # copy keyfiles to home dir and opt platform dir con_0_ssh.exec_cmd('cp {} {}'.format(keyfile_stx_origin, keyfile_stx_final), fail_ok=False) con_0_ssh.exec_cmd('cp {}.pub {}'.format( keyfile_stx_origin, public_key_stx), fail_ok=False) con_0_ssh.exec_sudo_cmd('cp {} {}'.format( keyfile_stx_final, keyfile_opt_pform), fail_ok=False) # Make sure owner is sysadmin # If private key exists in opt platform, then it must also exist # in home dir con_0_ssh.exec_sudo_cmd('chown {}:{} {}'.format( linux_user, nonroot_group, keyfile_stx_final), fail_ok=False) # ssh private key should now exists under home dir and opt platform # on controller-0 if con_ssh.get_hostname() != 'controller-0': # copy file from controller-0 home dir to controller-1 con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), source_ip='controller-0', source_path=keyfile_stx_final, source_pswd=HostLinuxUser.get_password(), dest_path=keyfile_stx_final, timeout=60) if not nova_keypair: LOG.info("Create nova keypair {} using public key {}".format( nova_keypair, public_key_stx)) if not con_ssh.file_exists(public_key_stx): con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), source_ip='controller-0', source_path=public_key_stx, source_pswd=HostLinuxUser.get_password(), dest_path=public_key_stx, timeout=60) con_ssh.exec_sudo_cmd('chown {}:{} {}'.format( linux_user, nonroot_group, public_key_stx), fail_ok=False) if ProjVar.get_var('REMOTE_CLI'): dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), os.path.basename(public_key_stx)) common.scp_from_active_controller_to_localhost( source_path=public_key_stx, dest_path=dest_path, timeout=60) public_key_stx = dest_path LOG.info("Public key file copied to localhost: {}".format( public_key_stx)) nova_helper.create_keypair(keypair_name, public_key=public_key_stx, auth_info=auth_info) natbox_client.exec_cmd('mkdir -p {}'.format( os.path.dirname(keyfile_path_natbox))) tis_ip = ProjVar.get_var('LAB').get('floating ip') for i in range(10): try: natbox_client.scp_on_dest(source_ip=tis_ip, source_user=HostLinuxUser.get_user(), source_pswd=HostLinuxUser.get_password(), source_path=keyfile_stx_final, dest_path=keyfile_path_natbox, timeout=120) LOG.info("private key is copied to NatBox: {}".format( keyfile_path_natbox)) break except exceptions.SSHException as e: if i == 9: raise LOG.info(e.__str__()) time.sleep(10)
def _rsync_files_to_con1(con_ssh=None, central_region=False, file_to_check=None): region = 'RegionOne' if central_region else None auth_info = Tenant.get('admin_platform', dc_region=region) if less_than_two_controllers(auth_info=auth_info, con_ssh=con_ssh): LOG.info("Less than two controllers on system. Skip copying file to " "controller-1.") return LOG.info("rsync test files from controller-0 to controller-1 if not " "already done") stx_home = HostLinuxUser.get_home() if not file_to_check: file_to_check = '{}/images/tis-centos-guest.img'.format(stx_home) try: with host_helper.ssh_to_host("controller-1", con_ssh=con_ssh) as con_1_ssh: if con_1_ssh.file_exists(file_to_check): LOG.info( "Test files already exist on controller-1. Skip rsync.") return except Exception as e: LOG.error("Cannot ssh to controller-1. Skip rsync. " "\nException caught: {}".format(e.__str__())) return cmd = "rsync -avr -e 'ssh -o UserKnownHostsFile=/dev/null -o " \ "StrictHostKeyChecking=no ' " \ "{}/* controller-1:{}".format(stx_home, stx_home) timeout = 1800 with host_helper.ssh_to_host("controller-0", con_ssh=con_ssh) as con_0_ssh: LOG.info("rsync files from controller-0 to controller-1...") con_0_ssh.send(cmd) end_time = time.time() + timeout while time.time() < end_time: index = con_0_ssh.expect( [con_0_ssh.prompt, PASSWORD_PROMPT, Prompt.ADD_HOST], timeout=timeout, searchwindowsize=100) if index == 2: con_0_ssh.send('yes') if index == 1: con_0_ssh.send(HostLinuxUser.get_password()) if index == 0: output = int(con_0_ssh.exec_cmd('echo $?')[1]) if output in [0, 23]: LOG.info( "Test files are successfully copied to controller-1 " "from controller-0") break else: raise exceptions.SSHExecCommandFailed( "Failed to rsync files from controller-0 to " "controller-1") else: raise exceptions.TimeoutException( "Timed out rsync files to controller-1")
def record_kpi(local_kpi_file, kpi_name, host=None, log_path=None, end_pattern=None, start_pattern=None, start_path=None, extended_regex=False, python_pattern=None, average_for_all=False, lab_name=None, con_ssh=None, sudo=False, topdown=False, init_time=None, build_id=None, start_host=None, uptime=5, start_pattern_init=False, sw_version=None, patch=None, unit=None, kpi_val=None, fail_ok=True): """ Record kpi in ini format in given file Args: local_kpi_file (str): local file path to store the kpi data kpi_name (str): name of the kpi host (str|None): which tis host the log is located at. When None, assume host is active controller start_host (str|None): specify only if host to collect start log is different than host for end log log_path (str): log_path on given host to check the kpi timestamps. Required if start_time or end_time is not specified end_pattern (str): One of the two options. Option2 only applies to duration type of KPI 1. pattern that signals the end or the value of the kpi. Used in Linux cmd 'grep' 2. end timestamp in following format: e.g., 2017-01-23 12:22:59 (for duration type of KPI) start_pattern (str|None): One of the two options. Only required for duration type of the KPI, where we need to calculate the time delta ourselves. 1. pattern that signals the start of the kpi. Used in Linux cmd 'grep'. 2. start timestamp in following format: e.g., 2017-01-23 12:10:00 start_path (str|None): log path to search for start_pattern if path is different than log_path for end_pattern extended_regex (bool): whether to use -E in grep for extended regex. python_pattern (str): Only needed for KPI that is directly taken from log without post processing, e.g., rate for drbd sync average_for_all (bool): whether to get all instances from the log and get average lab_name (str): e.g., ip_1-4, hp380 con_ssh (SSHClient|None): ssh client of active controller sudo (bool): whether to access log with sudo topdown (bool): whether to search log from top down. Default is bottom up. init_time (str|None): when set, logs prior to this timestamp will be ignored. uptime (int|str): get load average for the previous <uptime> minutes via 'uptime' cmd start_pattern_init (bool): when set, use the timestamp of the start pattern as the init time for the end pattern sw_version (str): e.g., 17.07 patch (str): patch name unit (str): unit for the kpi value if not 'Time(s)' Returns: """ try: if not lab_name: lab = ProjVar.get_var('LAB') if not lab: raise ValueError("lab_name needs to be provided") else: lab = lab_info.get_lab_dict(labname=lab_name) kpi_dict = {'lab': lab['name']} if start_pattern and end_pattern and build_id: # No need to ssh to system if both timestamps are known if re.match(TIMESTAMP_PATTERN, end_pattern) and re.match( TIMESTAMP_PATTERN, start_pattern): duration = common.get_timedelta_for_isotimes( time1=start_pattern, time2=end_pattern).total_seconds() kpi_dict.update({ 'value': duration, 'timestamp': end_pattern, 'build_id': build_id }) append_to_kpi_file(local_kpi_file=local_kpi_file, kpi_name=kpi_name, kpi_dict=kpi_dict) return if not con_ssh: con_ssh = ControllerClient.get_active_controller(fail_ok=True) if not con_ssh: if not ProjVar.get_var('LAB'): ProjVar.set_var(lab=lab) ProjVar.set_var(source_openrc=True) con_ssh = SSHClient(lab.get('floating ip'), HostLinuxUser.get_user(), HostLinuxUser.get_password(), CONTROLLER_PROMPT) con_ssh.connect() if not build_id or not sw_version: build_info = system_helper.get_build_info(con_ssh=con_ssh) build_id = build_id if build_id else build_info['BUILD_ID'] sw_version = sw_version if sw_version else build_info['SW_VERSION'] kpi_dict.update({'build_id': build_id, 'sw_version': sw_version}) if not patch: patch = ProjVar.get_var('PATCH') if patch: patch = ' '.join(patch) kpi_dict.update({'patch': patch}) else: kpi_dict.update({'patch': patch}) load_average = get_load_average(ssh_client=con_ssh, uptime=uptime) kpi_dict.update({'load_average': load_average}) if not unit: unit = 'Time(s)' kpi_dict.update({'unit': unit}) if host: kpi_dict['host'] = host if log_path: kpi_dict['log_path'] = log_path if kpi_val is not None: time_stamp = common.get_date_in_format(ssh_client=con_ssh, date_format=KPI_DATE_FORMAT) else: if start_pattern: kpi_val, time_stamp, count = get_duration( start_pattern=start_pattern, start_path=start_path, end_pattern=end_pattern, log_path=log_path, host=host, sudo=sudo, topdown=topdown, extended_regex=extended_regex, average_for_all=average_for_all, init_time=init_time, start_host=start_host, start_pattern_init=start_pattern_init, con_ssh=con_ssh) else: kpi_val, time_stamp, count = get_match( pattern=end_pattern, log_path=log_path, host=host, extended_regex=extended_regex, python_pattern=python_pattern, average_for_all=average_for_all, sudo=sudo, topdown=topdown, init_time=init_time, con_ssh=con_ssh) kpi_dict.update({'timestamp': time_stamp, 'value': kpi_val}) append_to_kpi_file(local_kpi_file=local_kpi_file, kpi_name=kpi_name, kpi_dict=kpi_dict) return 0, kpi_val except Exception as e: if not fail_ok: raise print("Failed to record kpi. Error: {}".format(e.__str__())) import traceback import sys traceback.print_exc(file=sys.stdout) return 1, e.__str__()
def add_subcloud(subcloud, subcloud_controller_node, system_controller_node, bootstrap_values_path, deploy_play_book_path, deploy_values_path, fail_ok=False, auth_info=Tenant.get('admin_platform', 'RegionOne'), source_openrc=None): """ """ operation = 'add' LOG.info("Attempt to {}: {}".format(operation, subcloud)) if system_controller_node.ssh_conn is None: msg = 'No ssh connection to System Controller; Cannot add subcloud {} '.format( subcloud) LOG.warning(msg) if fail_ok: return 1, msg else: raise exceptions.DCError(msg) subcloud_add_config_pathes = [ bootstrap_values_path, deploy_play_book_path, deploy_values_path ] if not subcloud_controller_node or not bootstrap_values_path or not deploy_play_book_path or not deploy_values_path: msg = "To add a subcloud all values must be specified" LOG.warning(msg) if fail_ok: return 1, msg else: raise exceptions.DCError(msg) for file_path in subcloud_add_config_pathes: if system_controller_node.ssh_conn.exec_cmd( "test -f {}".format(file_path))[0] != 0: msg = "Subcloud {} is missing config file {} ".format( subcloud, file_path) LOG.warning(msg) if fail_ok: return 1, msg else: raise exceptions.DCError(msg) args_dict = { '--bootstrap-address': subcloud_controller_node.host_ip, '--bootstrap-values': bootstrap_values_path, '--deploy-playbook': deploy_play_book_path, '--deploy-values': deploy_values_path, '--subcloud-password': HostLinuxUser.get_password() } opt_args = '' for key, val in args_dict.items(): if val is not None: opt_args += '{} {} '.format(key, val) rc, output = cli.dcmanager('subcloud ' + operation, opt_args, ssh_client=system_controller_node.ssh_conn, fail_ok=fail_ok, auth_info=auth_info, source_openrc=source_openrc) if rc != 0: msg = "Fail to add subcloud {}: {}".format(subcloud, output) LOG.warning(msg) if fail_ok: return 1, msg else: raise exceptions.DCError(msg) return rc, output
import time import pexpect from pexpect import pxssh from pytest import fail from consts.auth import HostLinuxUser from utils.clients import ssh from utils.clients.ssh import SSHClient from utils.clients.ssh import SSHFromSSH from utils.tis_log import LOG username = HostLinuxUser.get_user() password = HostLinuxUser.get_password() hostname = '10.10.10.3' #hostname = '128.224.150.73' # hostname = 'yow-cgcs-ironpass-1.wrs.com' def setup(): global ssh_client ssh_client = SSHClient(host=hostname, user=username, password=password) ssh_client.connect() ssh_client.send("source /etc/platform/openrc") ssh_client.prompt = ssh.ADMIN_PROMPT ssh_client.expect() def test_reconnect_after_swact(): LOG.tc_func_start() setup()