def virtwho_install(ssh, url=None): """ Install virt-who package, default is from repository, or gating msg, or brew url. :param ssh: ssh access of virt-who host :param url: url link of virt-who package from brew """ rhel_ver = base.rhel_version(ssh) cmd = ('rm -rf /var/lib/rpm/__db*;' 'mv /var/lib/rpm /var/lib/rpm.old;' 'rpm --initdb;' 'rm -rf /var/lib/rpm;' 'mv /var/lib/rpm.old /var/lib/rpm;' 'rm -rf /var/lib/yum/history/*.sqlite;' 'rpm -v --rebuilddb') if rhel_ver == '6': cmd = 'dbus-uuidgen > /var/lib/dbus/machine-id' if rhel_ver == '8': cmd = 'localectl set-locale en_US.utf8; source /etc/profile.d/lang.sh' _, _ = ssh.runcmd(cmd) if url: virtwho_install_by_url(ssh, url) else: ssh.runcmd('yum remove -y virt-who;' 'yum install -y virt-who') _, output = ssh.runcmd('rpm -qa virt-who') if 'virt-who' not in output: raise FailException('Failed to install virt-who package') logger.info(f'Succeeded to install {output.strip()}') return output.strip()
def associate(self, hypervisor, guest): """ Check the hypervisor is associated with guest on web. :param guest: guest name :param hypervisor: hypervisor host name/uuid/hwuuid """ host_id = self.host_id(host=hypervisor) guest_id = self.host_id(host=guest) if host_id and guest_id: # Find the guest in hypervisor page ret, output = request_get(url=f'{self.api}/api/v2/hosts/{host_id}', auth=self.auth) if guest.lower() in str(output): logger.info( 'Succeeded to find the associated guest in hypervisor page') else: raise FailException( 'Failed to find the associated guest in hypervisor page') # Find the hypervisor in guest page ret, output = request_get(url=f'{self.api}/api/v2/hosts/{guest_id}', auth=self.auth) if hypervisor.lower() in str(output): logger.info( 'Succeeded to find the associated hypervisor in guest page') else: raise FailException( 'Failed to find the associated hypervisor in guest page')
def attach(self, pool=None, quantity=None): """ Attach subscription by Pool ID or --auto. :param pool: Pool ID, attach by --auto when pool=None :param quantity: subscription number to attach, default is auto. :return: tty output. """ cmd = 'subscription-manager attach ' if pool: cmd += f'--pool={pool} ' if quantity: cmd += f'--quantity={quantity}' if not pool: cmd += f'--auto ' self.refresh() ret, output = self.ssh.runcmd(cmd) if ret == 0: logger.info(f'Succeeded to attach subscription for {self.host}') return output.strip() if '--auto' in cmd and 'Unable to find available' in output: logger.warning( f'Failed to attach subscription by auto for {self.host}.') return output.strip() if 'Multi-entitlement not supported' in output: logger.warning(output) return output.strip() else: raise FailException(f'Failed to attach subscription for {self.host}')
def satellite_deploy(args): """ Deploy satellite server by cdn or dogfood with required arguments. Please refer to the README for usage. """ sat_ver = args.version sat_repo = args.repo rhel_ver = args.rhel_compose.split('-')[1].split('.')[0] ssh = SSHConnect(host=args.server, user=args.ssh_username, pwd=args.ssh_password) system_init(ssh, 'satellite') # Enable repos of cnd or dogfood if 'cdn' in sat_repo: sm = SubscriptionManager(host=args.server, username=args.ssh_username, password=args.ssh_password, register_type='rhsm') satellite_repo_enable_cdn(sm, rhel_ver, sat_ver) if 'dogfood' in sat_repo: satellite_repo_enable_dogfood(ssh, rhel_ver, sat_ver) # Install satellite satellite_pkg_install(ssh) satellite_installer(ssh, args.admin_password) # Upload manifest as requirement if args.manifest: satellite_manifest_upload(ssh, args.manifest, args.admin_username, args.admin_password) logger.info(f'Succeeded to deploy satellite ({sat_ver})')
def log_analyzer(job_id): """ Analyse the log file got from polarion :param job_id: polarion job id """ ret, output = subprocess.getstatusoutput( f'curl -k ' f'-u {args.username}:{args.password} ' f'-X GET {args.url}-log?jobId={job_id} > {args.log_file}') if ret == 0: ret, output = subprocess.getstatusoutput(f'cat {args.log_file}') output = output.replace('"', '\"').replace(''', '\'') log = json.loads(output[output.rfind('Message Content:') + 17:output.rfind('}') + 1], strict=False) case_pass_num = 0 case_fail_num = 0 log_url = log['log-url'] case_list = log['import-testcases'] for i in range(len(case_list)): case_status = case_list[i]['status'] if case_status == 'passed': case_pass_num += 1 else: case_fail_num += 1 case_total_num = case_pass_num + case_fail_num logger.info(f'Total uploading case number: {case_total_num}\n' f'Passed uploading case number: {case_pass_num}\n' f'Failed uploading case number: {case_fail_num}\n' f'Log URL: {log_url}') if case_fail_num > 0: raise FailException('Failed to upload all test cases to polarion') else: raise FailException('Failed to get the polarion job log')
def install_rhel_by_beaker(args): """ Install rhel os by submitting job to beaker with required arguments. Please refer to the utils/README for usage. """ job_name = f'virtwho-{args.rhel_compose}' ssh_client = SSHConnect( host=config.beaker.client, user=config.beaker.client_username, pwd=config.beaker.client_password ) beaker_client_kinit( ssh_client, config.beaker.keytab, config.beaker.principal ) job_id = beaker_job_submit( ssh_client, job_name, args.rhel_compose, args.arch, args.variant, args.job_group, args.host, args.host_type, args.host_require, ) while beaker_job_status(ssh_client, job_name, job_id): time.sleep(60) host = beaker_job_result(ssh_client, job_name, job_id) if host: logger.info(f'Succeeded to install {args.rhel_compose} by beaker ' f'({host})') return host raise FailException('Failed to install {args.rhel_compose} by beaker')
def available(self, sku_id, sku_type='Virtual'): """ Search and analyze an available subscription by name and type. :param sku_id: sku id, such as RH00001 :param sku_type: 'Physical' or 'Virtual'. :return: a dict with sku attributes. """ cmd = f'subscription-manager list --av --all --matches={sku_id} |' \ f'tail -n +4' ret, output = self.ssh.runcmd(cmd) if ret == 0 and "Pool ID:" in output: skus = output.strip().split('\n\n') for sku in skus: sku_attr = self.attr_analyzer(sku) if 'system_type' in sku_attr.keys(): sku_attr['sku_type'] = sku_attr['system_type'] else: sku_attr['sku_type'] = sku_attr['entitlement_type'] if sku_attr['sku_type'] == sku_type: logger.info(f'Succeeded to find {sku_type}:{sku_id} ' f'in {self.host}') if '(Temporary)' in sku_attr['subscription_type']: sku_attr['temporary'] = True else: sku_attr['temporary'] = False return sku_attr logger.warning(f'Failed to find {sku_type}:{sku_id}' in {self.host}) return None
def consumed(self, pool): """ List and analyze the consumed subscription by Pool ID. :param pool: Pool ID for checking. :return: a dict with sku attributes. """ self.refresh() ret, output = self.ssh.runcmd(f'subscription-manager list --co') if ret == 0: if (output is None or 'No consumed subscription pools were found' in output): logger.info(f'No consumed subscription found in {self.host}.') return None elif "Pool ID:" in output: sku_attrs = output.strip().split('\n\n') for attr in sku_attrs: sku_attr = self.attr_analyzer(attr) if sku_attr['pool_id'] == pool: logger.info(f'Succeeded to get the consumed ' f'subscription in {self.host}') if '(Temporary)' in sku_attr['subscription_type']: sku_attr['temporary'] = True else: sku_attr['temporary'] = False return sku_attr logger.warning('Failed to get consumed subscriptions.') return None
def test_debug(self, virtwho): """Test the '-d' option in virt-who command line :title: virt-who: cli: test option -d :id: 9389396f-d4c3-4be2-8aec-a9f7be3d25f1 :caseimportance: High :tags: tier1 :customerscenario: false :upstream: no :steps: 1. clean all virt-who global configurations 2. run "#virt-who -c" without "-d" 3. run "#virt-who -d -c" :expectedresults: 1. no [DEBUG] log printed without "-d" option 2. [DEBUG] logs are printed with "-d" option """ # result = virtwho.run_cli(debug=False) # assert (result['send'] == 1 # and result['error'] == 0 # and result['debug'] is False) # # result = virtwho.run_cli(debug=True) # assert (result['send'] == 1 # and result['error'] == 0 # and result['debug'] is True) logger.info("Succeeded to run the 'test_debug_by_cli'")
def put_dir(self, local_dir, remote_dir): """ Upload all files from directory to a remote directory :param local_dir: all files from local path to be uploaded. :param remote_dir: a remote path where the uploaded files will be placed. """ sftp, conn = self._transfer() for root, dirs, files in os.walk(local_dir): for filespath in files: local_file = os.path.join(root,filespath) a = local_file.replace(local_dir,'') remote_file = os.path.join(remote_dir,a) try: sftp.put(local_file,remote_file) except Exception as e: sftp.mkdir(os.path.split(remote_file)[0]) sftp.put(local_file,remote_file) for name in dirs: local_path = os.path.join(root,name) a = local_path.replace(local_dir,'') remote_path = os.path.join(remote_dir,a) try: sftp.mkdir(remote_path) except Exception as e: logger.info(e) conn.close()
def ssh_connect(ssh): """ Test if the host is running and can be accessed by ssh. :param ssh: ssh access of host """ ret, output = ssh.runcmd('rpm -qa filesystem') if ret == 0 and 'filesystem' in output: logger.info(f'Suceeded to ssh connect the host')
def unregister(self): """ Unregister and clean host by subscription-manager. """ ret, _ = self.ssh.runcmd('subscription-manager unregister;' 'subscription-manager clean') if ret == 0: logger.info(f'Succeeded to unregister host') else: raise FailException(f'Failed to unregister {self.host}')
def job_id_get(): """ Get the job id of polarion upload """ ret, output = subprocess.getstatusoutput( "cat %s | awk '{print $4}' | awk '$1=$1'" % args.log_file) if ret == 0: logger.info(f'Succeeded to get the polarion job id: {output}') return output raise FailException('Fail to get polarion job id')
def msg_number(self, output, msg): """ Get message numbers. :param output: output string to search around :param msg: message string to be searched :return: the message number """ number = len(re.findall(msg, output, re.I)) logger.info(f"Find '{msg}' {number} times") return number
def facts_remove(self): """ Remove subscription facts. """ ret, output = self.ssh.runcmd('rm -f /etc/rhsm/facts/custom.facts;' 'subscription-manager facts --update') time.sleep(60) if ret == 0 and 'Successfully updated' in output: logger.info(f'Succeeded to remove custom.facts for {self.host}') else: raise FailException( f'Failed to remove custom.facts for {self.host}')
def refresh(self): """ Refresh subscription by command 'subscription-manager refresh'. """ for i in range(3): ret, output = self.ssh.runcmd('subscription-manager refresh') if ret == 0: logger.info(f'Succeeded to refresh subscription') return True logger.warning('Try again to refresh subscription after 180s...') time.sleep(180) raise FailException(f'Failed to refresh subscription for {self.host}')
def provision_virtwho_host(args): """ Configure virt-who host for an existing server or a new one installed by beaker. Please refer to the provision/README for usage. """ logger.info("+++ Start to deploy the virt-who host +++") if args.gating_msg: msg = base.gating_msg_parser(args.gating_msg) args.virtwho_pkg_url = msg['pkg_url'] if not args.rhel_compose: args.rhel_compose = msg['latest_rhel_compose'] config.update('gating', 'package_nvr', msg['pkg_nvr']) config.update('gating', 'build_id', msg['build_id']) config.update('gating', 'task_id', msg['task_id']) # Will deploy a new host by beaker if no server provided if not args.server: beaker_args_define(args) args.server = install_rhel_by_beaker(args) args.username = config.beaker.default_username args.password = config.beaker.default_password ssh_host = SSHConnect(host=args.server, user=args.username, pwd=args.password) # Initially set the host base.rhel_compose_repo(ssh_host, args.rhel_compose, '/etc/yum.repos.d/compose.repo') base.system_init(ssh_host, 'virtwho') virtwho_pkg = virtwho_install(ssh_host, args.virtwho_pkg_url) # Update the test properties in virtwho.ini config.update('job', 'rhel_compose', args.rhel_compose) config.update('virtwho', 'server', args.server) config.update('virtwho', 'username', args.username) config.update('virtwho', 'password', args.password) config.update('virtwho', 'package', virtwho_pkg) # Configure the virt-who host as mode requirements if (config.job.hypervisor == 'libvirt' or 'libvirt' in config.job.multi_hypervisors): libvirt_access_no_password(ssh_host) if (config.job.hypervisor == 'kubevirt' or 'kubevirt' in config.job.multi_hypervisors): kubevirt_config_file(ssh_host) if (config.job.hypervisor == 'local' or 'local' in config.job.multi_hypervisors): config.update('local', 'server', args.server) config.update('server', 'username', args.username) config.update('server', 'password', args.password) logger.info(f"+++ Suceeded to deploy the virt-who host " f"{args.rhel_compose}/{args.server} +++")
def unattach(self, pool=None): """ Remove subscription by Pool ID or remove all. :param pool: Pool ID, remove all when pool=None. """ cmd = 'subscription-manager remove --all' if pool: cmd = f'subscription-manager remove --pool={pool}' ret, output = self.ssh.runcmd(cmd) if ret == 0: logger.info(f'Succeeded to remove subscription for {self.host}') else: raise FailException(f'Failed to remove subscription for {self.host}')
def start(self, q, cli): """ Start virt-who by command line or service. :param q: queue :param cli: the command to run virt-who, such as "virt-who -d -o", will start virt-who by service when no cli configured. """ if cli: logger.info(f"Start to run virt-who by cli: {cli}") _, output = self.ssh.runcmd(cli) else: logger.info("Start to run virt-who by service") _, output = self.operate_service()
def rhsm_log_get(self, wait): """ Get and return rhsm log when the expected message found in log. :param wait: wait time before starting analyzing log :return: output of rhsm log """ rhsm_output = "" if wait: time.sleep(wait) for i in range(30): time.sleep(15) _, rhsm_output = self.ssh.runcmd(f"cat {self.rhsm_log_file}") if self.msg_search(rhsm_output, "status=429") is True: logger.warning("429 code found when run virt-who") break if self.thread_number() == 0: logger.info("Virt-who is terminated after run once") break if self.msg_search(rhsm_output, "\\[.*ERROR.*\\]") is True: logger.info("Error found when run virt-who") break if self.send_number(rhsm_output) > 0: logger.info("Succeed to send mapping after run virt-who") break if i == 29: logger.info("Timeout when run virt-who") break return rhsm_output
def uuid(self, host_name): """ Get the consumer uuid by host name :param host_name: host name :return: consumer uuid or None """ consumer = self.consumers(host_name) if consumer: uuid = consumer['uuid'] logger.info(f'Succeeded to get stage consumer uuid: ' f'{host_name}:{uuid}') return uuid raise FailException( f'Failed to get stage consumer uuid for {host_name}')
def info(self, host_name): """ Get the consumer host information by host name, including the detail facts info. :param host_name: host name :return: output to a dic """ uuid = self.uuid(host_name) status, info = request_get(url=f'{self.api}/consumers/{uuid}', auth=self.auth) if status == 200: logger.info(f'Succeeded to get consumer info for {host_name}') return info raise FailException(f'Failed to get consumer info for {host_name}')
def beaker_client_kinit(ssh, keytab, principal): """ Initiate beaker client. :param ssh: ssh access of client to run command :param keytab: jenkins keytab :param principal: jenkins principal :return: True/False """ ret, output = ssh.runcmd(f'kinit -k -t {keytab} {principal}') if ret == 0: logger.info(f'Succeeded to initiate beaker client') return True logger.error(f'Failed to initiate beaker client') return False
def settings(self, name, value): """ Update the settings. :param name: such as unregister_delete_host. :param value: the value. :return: True or raise Fail. """ ret, output = self.ssh.runcmd(f'hammer settings set ' f'--name={name} ' f'--value={value}') if ret == 0 and f'Setting [{name}] updated to' in output: logger.info(f'Succeeded to set {name}:{value} for satellite') return True raise FailException(f'Failed to set {name}:{value} for satellite')
def xml_file_upload(): """ Upload the xml file to Polarion """ cmd = (f'curl -k ' f'-u {args.username}:{args.password} ' f'-X POST -F file=@{args.xml_file} ' f'{args.url} > {args.log_file}') logger.info(cmd) ret, output = subprocess.getstatusoutput(cmd) time.sleep(60) if ret == 0: logger.info(f'Finished the upload step') else: raise FailException(f'Failed the upload step')
def test_http_option(self): """Just a demo :title: virt-who: esx: test http option :id: 37ee22b4-5105-4693-857d-4003715606ef :caseimportance: High :tags: tier2 :customerscenario: false :upstream: no :steps: 1. :expectedresults: 1. """ logger.info("Succeeded to run the 'test_http_option'")
def test_hostname_option(self): """Just a demo :title: virt-who: esx: test hostname option :id: fb1f5dec-89c7-41e7-a15b-52b843f6f590 :caseimportance: High :tags: tier1 :customerscenario: false :upstream: no :steps: 1. :expectedresults: 1. """ logger.info("Succeeded to run the 'test_hostname_option'")
def test_upgrade_downgrade_by_yum(self): """Just a demo :title: VIRT-WHO: test upgrade and downgrade by yum :id: 64c09d15-3050-4d73-8d9e-296836c4ac58 :caseimportance: High :tags: tier1 :customerscenario: false :upstream: no :steps: 1. :expectedresults: 1. """ logger.info("Succeeded to run the 'test_upgrade_downgrade_by_yum'")
def test_http_option(self): """Just a demo :title: virt-who: hyperv: test http option :id: 52f8f2df-ef7b-48b3-9579-364ea77e8409 :caseimportance: High :tags: tier2 :customerscenario: false :upstream: no :steps: 1. :expectedresults: 1. """ logger.info("Succeeded to run the 'test_http_option'")
def test_hostname_option(self): """Just a demo :title: virt-who: hyperv: test hostname option :id: 4ab41c9b-3b74-4987-a73c-cacf2c9601e1 :caseimportance: High :tags: tier1 :customerscenario: false :upstream: no :steps: 1. :expectedresults: 1. """ logger.info("Succeeded to run the 'test_hostname_option'")