def process(configs=None): kernel_version = next(api.consume(InstalledTargetKernelVersion), None) if kernel_version: while True: config = [] if configs: config = ['-c', configs.pop(0)] for arg in api.consume(KernelCmdlineArg): cmd = ['grubby', '--update-kernel=/boot/vmlinuz-{}'.format(kernel_version.version), '--args={}={}'.format(arg.key, arg.value)] cmd += config try: stdlib.run(cmd) if architecture.matches_architecture(architecture.ARCH_S390X): # on s390x we need to call zipl explicitly because of issue in grubby, # otherwise the entry is not updated in the ZIPL bootloader # See https://bugzilla.redhat.com/show_bug.cgi?id=1764306 stdlib.run(['/usr/sbin/zipl']) except (OSError, stdlib.CalledProcessError) as e: raise StopActorExecutionError( "Failed to append extra arguments to kernel command line.", details={"details": str(e)}) if not configs: break
def test_actor_messaging_paths(leapp_forked, repository, actor_name): # noqa; pylint: disable=unused-argument messaging = _TestableMessaging() with _with_loaded_actor(repository, actor_name, messaging) as (_unused, actor): messaging.feed(ApiTestConsume(data='prefilled'), actor) assert len(list(actor.consume(ApiTestConsume))) == 1 assert next(actor.consume(ApiTestConsume)).data == 'prefilled' assert len(list(api.consume(ApiTestConsume))) == 1 assert next(api.consume(ApiTestConsume)).data == 'prefilled' actor_message = 'Actor {} sent message via Actor'.format(actor_name) api_message = 'Actor {} sent message via API'.format(actor_name) actor.produce(ApiTestProduce(data=actor_message)) assert messaging.produced.pop().data == actor_message api.produce(ApiTestProduce(data=api_message)) assert messaging.produced.pop().data == api_message api.report_error("api error report", details={'source': 'api'}) assert messaging.errors.pop().message.startswith("api ") actor.report_error("actor error report", details={'source': 'actor'}) assert messaging.errors.pop().message.startswith("actor ")
def checkpcidrivers_main(): """Main entrypoint of the CheckPCIDrivers Actor.""" try: pci_devs = next(api.consume(PCIDevices)) restricted_pci_devs = next(api.consume(RestrictedPCIDevices)) except StopIteration: raise StopActorExecutionError(message=( "At least one of the needed messages is empty. " "Required messages are PCIDevices, RestrictedPCIDevices.")) else: # get set of drivers-names/pci-ids presented on host driver_names_on_host = set(dev.driver for dev in pci_devs.devices) pci_ids_on_host = set(dev.pci_id for dev in pci_devs.devices) # get restricted driver_names and pci_ids dict lookups try: restricted_devices_drivers = create_dict_lookup( restricted_pci_devs.driver_names, key="driver_name") restricted_devices_pcis = create_dict_lookup( restricted_pci_devs.pci_ids, key="pci_id") except (AttributeError, ValueError) as err: raise StopActorExecutionError(message=str(err)) # get set of restricted driver-names/pci-ids presented on host restricted_driver_names_on_host = driver_names_on_host & set( restricted_devices_drivers) restricted_pci_ids_on_host = pci_ids_on_host & set( restricted_devices_pcis) # check if at least one driver on host is not available on RHEL8 if any(restricted_devices_drivers[driver_name].available_rhel8 == 0 for driver_name in restricted_driver_names_on_host) or any( restricted_devices_pcis[pci].available_rhel8 == 0 for pci in restricted_pci_ids_on_host): api.current_logger().critical( "Some of the host drivers are unavailable on the RHEL 8 " "system. Inhibiting the upgrade...", ) create_report( render_report( restricted_driver_names_on_host, restricted_pci_ids_on_host, restricted_devices_drivers, restricted_devices_pcis, inhibit_upgrade=True, )) # check if at least one driver on host is not supported on RHEL8 elif any(restricted_devices_drivers[driver_name].supported_rhel8 == 0 for driver_name in restricted_driver_names_on_host) or any( restricted_devices_pcis[pci].supported_rhel8 == 0 for pci in restricted_pci_ids_on_host): api.current_logger().warning( "Some of the host drivers are unsupported on the RHEL 8 " "system. Warning the user...", ) create_report( render_report( restricted_driver_names_on_host, restricted_pci_ids_on_host, restricted_devices_drivers, restricted_devices_pcis, ))
def _install_initram_deps(packages): used_repos = api.consume(UsedTargetRepositories) target_userspace_info = next(api.consume(TargetUserSpaceInfo), None) dnfplugin.install_initramdisk_requirements( packages=packages, target_userspace_info=target_userspace_info, used_repos=used_repos)
def perform(): packages = {'dnf'} for message in api.consume(RequiredTargetUserspacePackages): packages.update(message.packages) rhsm_info = next(api.consume(SourceRHSMInfo), None) if not rhsm_info and not rhsm.skip_rhsm(): api.current_logger().warn( 'Could not receive RHSM information - Is this system registered?') return xfs_info = next(api.consume(XFSPresence), XFSPresence()) storage_info = next(api.consume(StorageInfo), None) if not storage_info: api.current_logger.error('No storage info available cannot proceed.') prod_cert_path = _get_product_certificate_path() with overlaygen.create_source_overlay(mounts_dir=constants.MOUNTS_DIR, scratch_dir=constants.SCRATCH_DIR, storage_info=storage_info, xfs_info=xfs_info) as overlay: with overlay.nspawn() as context: target_version = api.current_actor().configuration.version.target with rhsm.switched_certificate(context, rhsm_info, prod_cert_path, target_version) as target_rhsm_info: api.current_logger().debug( 'Target RHSM Info: SKUs: {skus} Repositories: {repos}'. format(repos=target_rhsm_info.enabled_repos, skus=rhsm_info.attached_skus if rhsm_info else [])) target_repoids = gather_target_repositories(target_rhsm_info) api.current_logger().debug( "Gathered target repositories: {}".format( ', '.join(target_repoids))) if not target_repoids: raise StopActorExecutionError( message= 'There are no enabled target repositories for the upgrade process to proceed.', details={ 'hint': ('Ensure your system is correctly registered with the subscription manager and that' ' your current subscription is entitled to install the requested target version {version}' ).format(version=api.current_actor(). configuration.version.target) }) prepare_target_userspace(context, constants.TARGET_USERSPACE, target_repoids, list(packages)) _prep_repository_access(context, constants.TARGET_USERSPACE) dnfplugin.install(constants.TARGET_USERSPACE) api.produce( UsedTargetRepositories(repos=[ UsedTargetRepository(repoid=repo) for repo in target_repoids ])) api.produce(target_rhsm_info) api.produce( TargetUserSpaceInfo(path=constants.TARGET_USERSPACE, scratch=constants.SCRATCH_DIR, mounts=constants.MOUNTS_DIR))
def process(): pkg_facts = next(api.consume(PkgManagerInfo), None) rhui_facts = next(api.consume(RHUIInfo), None) if pkg_facts and pkg_facts.etc_releasever is not None or rhui_facts: handle_etc_releasever() else: api.current_logger().debug( 'Skipping execution. "releasever" is not set in DNF/YUM vars directory and no RHUIInfo has ' 'been produced')
def process(): rhel7_ifaces = next(api.consume(PersistentNetNamesFacts)).interfaces rhel8_ifaces = next(api.consume(PersistentNetNamesFactsInitramfs)).interfaces rhel7_ifaces_map = {iface.mac: iface for iface in rhel7_ifaces} rhel8_ifaces_map = {iface.mac: iface for iface in rhel8_ifaces} initrd_files = [] missing_ifaces = [] renamed_interfaces = [] if rhel7_ifaces != rhel8_ifaces: for iface in rhel7_ifaces: rhel7_name = rhel7_ifaces_map[iface.mac].name try: rhel8_name = rhel8_ifaces_map[iface.mac].name except KeyError: missing_ifaces.append(iface) api.current_logger().warning( 'The device with MAC "{}" is not detected in the upgrade' ' environment. Required driver: "{}".' ' Original interface name: "{}".' .format(iface.mac, iface.driver, iface.name) ) continue if rhel7_name != rhel8_name: api.current_logger().warning('Detected interface rename {} -> {}.'.format(rhel7_name, rhel8_name)) if re.search('eth[0-9]+', iface.name) is not None: api.current_logger().warning('Interface named using eth prefix, refusing to generate link file') renamed_interfaces.append(RenamedInterface(**{'rhel7_name': rhel7_name, 'rhel8_name': rhel8_name})) continue initrd_files.append(generate_link_file(iface)) if missing_ifaces: msg = ( 'Some network devices have not been detected inside the' ' upgrade environment and so related network interfaces' ' could be renamed on the upgraded system.' ) # Note(pstodulk): # This usually happens when required (RHEL 8 compatible) # drivers are not included in the upgrade initramfs. # We can add more information later. Currently we cannot provide # better instructions for users before (at least): # a) networking work in the upgrade initramfs (PR #583) # b) it's possible to influence the upgrade initramfs (PR #517) # TODO(pstodulk): gen report msg api.current_logger().warning(msg) api.produce(RenamedInterfaces(renamed=renamed_interfaces)) api.produce(InitrdIncludes(files=initrd_files))
def install_initram_deps(context): used_repos = api.consume(UsedTargetRepositories) target_userspace_info = next(api.consume(TargetUserSpaceInfo), None) packages = set() for message in api.consume(RequiredUpgradeInitramPackages): packages.update(message.packages) dnfplugin.install_initramdisk_requirements( packages=packages, target_userspace_info=target_userspace_info, used_repos=used_repos)
def process(): target_version = api.current_actor().configuration.version.target pkg_facts = next(api.consume(PkgManagerInfo), None) rhui_facts = next(api.consume(RHUIInfo), None) if pkg_facts and pkg_facts.etc_releasever is not None or rhui_facts: # if "/etc/dnf/vars/releasever" file exists, or we are using RHUI, let's set it to our # target version. _set_releasever(target_version) else: api.current_logger().debug( 'Skipping execution. "releasever" is not set in DNF/YUM vars directory and no RHUIInfo has ' 'been produced')
def process(): kernel_version = next(api.consume(InstalledTargetKernelVersion), None) if kernel_version: for arg in api.consume(KernelCmdlineArg): cmd = [ 'grubby', '--update-kernel=/boot/vmlinuz-{}'.format( kernel_version.version), '--args={}={}'.format(arg.key, arg.value) ] try: stdlib.run(cmd) except (OSError, stdlib.CalledProcessError) as e: raise StopActorExecutionError( "Failed to append extra arguments to kernel command line.", details={"details": str(e)})
def remove_boot_entry(): # we need to make sure /boot is mounted before trying to remove the boot entry facts_msg = api.consume(FirmwareFacts) facts = next(facts_msg, None) if not facts: raise StopActorExecutionError( 'Could not identify system firmware', details={ 'details': 'Actor did not receive FirmwareFacts message.' }) mount_points_per_firmware = { 'bios': ['/boot'], 'efi': ['/boot', '/boot/efi'] } for mp in mount_points_per_firmware[facts.firmware]: try: run(['/bin/mount', mp]) except CalledProcessError: # partitions have been most likely already mounted pass kernel_filepath = get_upgrade_kernel_filepath() run(['/usr/sbin/grubby', '--remove-kernel={0}'.format(kernel_filepath)]) if architecture.matches_architecture(architecture.ARCH_S390X): # on s390x we need to call zipl explicitly because of issue in grubby, # otherwise the new boot entry will not be set as default # See https://bugzilla.redhat.com/show_bug.cgi?id=1764306 run(['/usr/sbin/zipl']) # TODO: Move calling `mount -a` to a separate actor as it is not really related to removing the upgrade boot entry. # It's worth to call it after removing the boot entry to avoid boot loop in case mounting fails. run(['/bin/mount', '-a'])
def gather_target_repositories(target_rhsm_info): """ Performs basic checks on requirements for RHSM repositories and returns the list of target repository ids to use during the upgrade. """ # FIXME: check that required repo IDs (baseos, appstream) # + or check that all required RHEL repo IDs are available. if not rhsm.skip_rhsm(): if not target_rhsm_info.available_repos or len( target_rhsm_info.available_repos) < 2: raise StopActorExecutionError( message='Cannot find required basic RHEL repositories.', details={ 'hint': ('It is required to have RHEL repository on the system' ' provided by the subscription-manager. Possibly you' ' are missing a valid SKU for the target system or network' ' connection failed. Check whether your system is attached' ' to the valid SKU providing target repositories.') }) target_repoids = [] for target_repo in api.consume(TargetRepositories): for rhel_repo in target_repo.rhel_repos: if rhel_repo.repoid in target_rhsm_info.available_repos: target_repoids.append(rhel_repo.repoid) for custom_repo in target_repo.custom_repos: # TODO: complete processing of custom repositories # HINT: now it will work only for custom repos that exist # + already on the system in a repo file # TODO: should check available_target_repoids + additional custom repos # + outside of rhsm.. # #if custom_repo.repoid in available_target_repoids: target_repoids.append(custom_repo.repoid) return target_repoids
def get_pkgs(pkg_name): """ Get all installed packages of the given name signed by Red Hat. """ rpms = next(api.consume(InstalledRedHatSignedRPM), InstalledRedHatSignedRPM()).items return [pkg for pkg in rpms if pkg.name == pkg_name]
def perform_update(): for sctpconfig in api.consume(SCTPConfig): api.current_logger().info('Consuming sctp={}'.format( sctpconfig.wanted)) if sctpconfig.wanted: enable_sctp() break
def _get_facts(model): """ Consumes input data model :param class model: name of model which we consume """ return next(api.consume(model), None)
def _get_storage_data(): storage = next(api.consume(StorageInfo), None) if not storage: raise StopActorExecutionError('The StorageInfo message is not available.') if not storage.fstab: raise StopActorExecutionError('Data from the /etc/fstab file is missing.') return storage
def remove_boot_entry(): # we need to make sure /boot is mounted before trying to remove the boot entry facts_msg = api.consume(FirmwareFacts) facts = next(facts_msg, None) if not facts: raise StopActorExecutionError('Could not identify system firmware', details={'details': 'Actor did not receive FirmwareFacts message.'}) mount_points_per_firmware = { 'bios': ['/boot'], 'efi': ['/boot', '/boot/efi'] } for mp in mount_points_per_firmware[facts.firmware]: try: run(['/bin/mount', mp]) except CalledProcessError: # partitions have been most likely already mounted pass kernel_filepath = get_upgrade_kernel_filepath() run([ '/usr/sbin/grubby', '--remove-kernel={0}'.format(kernel_filepath) ]) # TODO: Move calling `mount -a` to a separate actor as it is not really related to removing the upgrade boot entry. # It's worth to call it after removing the boot entry to avoid boot loop in case mounting fails. run([ '/bin/mount', '-a' ])
def generate_initram_disk(context): """ Function to actually execute the init ramdisk creation. Includes handling of specified dracut modules from the host when needed. The check for the 'conflicting' dracut modules is in a separate actor. """ env = {} if get_target_major_version() == '9': env = {'SYSTEMD_SECCOMP': '0'} # TODO(pstodulk): Add possibility to add particular drivers # Issue #645 modules = _get_dracut_modules() # deprecated files = set() for task in api.consume(UpgradeInitramfsTasks): modules.extend(task.include_dracut_modules) files.update(task.include_files) copy_dracut_modules(context, modules) # FIXME: issue #376 context.call([ '/bin/sh', '-c', 'LEAPP_ADD_DRACUT_MODULES="{modules}" LEAPP_KERNEL_ARCH={arch} ' 'LEAPP_DRACUT_INSTALL_FILES="{files}" {cmd}'.format( modules=','.join([mod.name for mod in modules]), arch=api.current_actor().configuration.architecture, files=' '.join(files), cmd=os.path.join('/', INITRAM_GEN_SCRIPT_NAME)) ], env=env) copy_boot_files(context)
def get_installed_pkgs(): """ Get installed Red Hat-signed packages. :return: Set of names of the installed Red Hat-signed packages """ installed_pkgs = set() installed_rh_signed_rpm_msgs = api.consume(InstalledRedHatSignedRPM) installed_rh_signed_rpm_msg = next(installed_rh_signed_rpm_msgs, None) if list(installed_rh_signed_rpm_msgs): api.current_logger().warning( 'Unexpectedly received more than one InstalledRedHatSignedRPM message.' ) if not installed_rh_signed_rpm_msg: raise StopActorExecutionError( 'Cannot parse PES data properly due to missing list of installed packages', details={ 'Problem': 'Did not receive a message with installed Red Hat-signed ' 'packages (InstalledRedHatSignedRPM)' }) installed_pkgs.update( [pkg.name for pkg in installed_rh_signed_rpm_msg.items]) return installed_pkgs
def _get_input_model(model): """ Gets data model from an actor. :param obj model: object of model which data will be consumed """ return next(api.consume(model), None)
def set_rhsm_release(): """Set the RHSM release to the target RHEL 8 minor version.""" info = next(api.consume(TargetRHSMInfo), None) if info and info.release: rhsm.set_release(mounting.NotIsolatedActions(base_dir='/'), info.release) else: api.current_logger().debug('Skipping setting the RHSM release due to the use of LEAPP_DEVEL_SKIP_RHSM.')
def _is_crb_used(): # the UsedTargetRepositories has to be set always, by design of IPU used_repos = next(api.consume(UsedTargetRepositories), None) for repo in used_repos.repos: if repo.repoid == CRB_REPOID: return True return False
def process(): files = _get_files() modules = _get_modules() if not files and not modules: api.current_logger().debug( 'No additional files or modules required to add into the target initramfs.') return target_kernel = next(api.consume(InstalledTargetKernelVersion), None) if not target_kernel: raise StopActorExecutionError( 'Cannot get version of the installed RHEL-8 kernel', details={'Problem': 'Did not receive a message with installed RHEL-8 kernel version' ' (InstalledTargetKernelVersion)'}) copy_dracut_modules(modules) try: # multiple files|modules need to be quoted, see --install | --add in dracut(8) module_names = list({module.name for module in modules}) cmd = ['dracut', '-f', '--kver', target_kernel.version] if files: cmd += ['--install', '"{}"'.format(' '.join(files))] if modules: cmd += ['--add', '"{}"'.format(' '.join(module_names))] run(cmd) except CalledProcessError as e: # just hypothetic check, it should not die raise StopActorExecutionError('Cannot regenerate dracut image.', details={'details': str(e)})
def check_dialogs(inhibit_if_no_userchoice=True): results = list(api.consume(DialogModel)) for dialog in results: sections = dialog.answerfile_sections summary = ( 'One or more sections in answerfile are missing user choices: {}\n' 'For more information consult https://leapp.readthedocs.io/en/latest/dialogs.html' ) dialog_resources = [ reporting.RelatedResource('dialog', s) for s in sections ] dialogs_remediation = ( 'Please register user choices with leapp answer cli command or by manually editing ' 'the answerfile.') # FIXME: Enable more choices once we can do multi-command remediations cmd_remediation = [[ 'leapp', 'answer', '--section', "{}={}".format(s, choice) ] for s, choices in dialog.answerfile_sections.items() for choice in choices[:1]] report_data = [ reporting.Title('Missing required answers in the answer file'), reporting.Severity(reporting.Severity.HIGH), reporting.Summary(summary.format('\n'.join(sections))), reporting.Flags([reporting.Flags. INHIBITOR] if inhibit_if_no_userchoice else []), reporting.Remediation(hint=dialogs_remediation, commands=cmd_remediation), reporting.Key(dialog.key) ] reporting.create_report(report_data + dialog_resources)
def check(): results = list(api.consume(Report)) for error in [ msg for msg in results if 'inhibitor' in msg.report.get('flags', []) ]: api.report_error(error.report['title'])
def process(): if not architecture.matches_architecture(architecture.ARCH_S390X): return cpuinfo = next(api.consume(CPUInfo), None) if cpuinfo is None: raise StopActorExecutionError(message=("Missing information about CPU.")) if not cpuinfo.machine_type: # this is not expected to happen, but in case... api.curernt_logger().warning("The machine (CPU) type is empty.") if cpuinfo.machine_type not in SUPPORTED_MACHINE_TYPES: summary = ("The machine is not possible to upgrade because of unsupported" " type of the processor. Regarding the official documentation," " z13 and z14 processors are supported on the Red Had Enterprise" " Linux 8 system for the IBM Z architecture. If you have one of" " the supported processors, you should see provided the machine" " type in the /proc/cpuinfo file with one of those values: {}." " Detected machine type of the CPU is '{}'." .format(", ".join([str(i) for i in SUPPORTED_MACHINE_TYPES]), cpuinfo.machine_type)) report = [ reporting.Title("The processor is not supported by the target system."), reporting.Summary(summary), reporting.Severity(reporting.Severity.HIGH), reporting.Tags([reporting.Tags.SANITY]), reporting.Flags([reporting.Flags.INHIBITOR]), reporting.ExternalLink( title="Considerations in adopting RHEL 8", url=("https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/" "html-single/considerations_in_adopting_rhel_8/" "index#changes-in-gcc-in-rhel-8_changes-in-toolchain-since-rhel-7")) ] reporting.create_report(report)
def check_xfs(): storage_info_msgs = api.consume(StorageInfo) storage_info = next(storage_info_msgs, None) if list(storage_info_msgs): api.current_logger().warning( 'Unexpectedly received more than one StorageInfo message.') if not storage_info: raise StopActorExecutionError( 'Could not check if XFS is in use.', details={'details': 'Did not receive a StorageInfo message'}) fstab_data = check_xfs_fstab(storage_info.fstab) mount_data = check_xfs_mount(storage_info.mount) systemdmount_data = check_xfs_systemdmount(storage_info.systemdmount) mountpoints = fstab_data | mount_data | systemdmount_data xfs_presence = XFSPresence() # By now, we only care for XFS without ftype in use for /var has_xfs_without_ftype = False for mp in ('/var', '/'): if mp in mountpoints: xfs_presence.present = True if is_xfs_without_ftype(mp): has_xfs_without_ftype = True xfs_presence.without_ftype = has_xfs_without_ftype api.produce(xfs_presence)
def gather_target_repositories(context): """ Perform basic checks on requirements for RHSM repositories and return the list of target repository ids to use during the upgrade. :param context: An instance of a mounting.IsolatedActions class :type context: mounting.IsolatedActions class :return: List of target system repoids :rtype: List(string) """ # Get the RHSM repos available in the RHEL 8 container available_repos = rhsm.get_available_repo_ids( context, releasever=api.current_actor().configuration.version.target) # FIXME: check that required repo IDs (baseos, appstream) # + or check that all required RHEL repo IDs are available. if not rhsm.skip_rhsm(): if not available_repos or len(available_repos) < 2: raise StopActorExecutionError( message='Cannot find required basic RHEL 8 repositories.', details={ 'hint': ('It is required to have RHEL repositories on the system' ' provided by the subscription-manager. Possibly you' ' are missing a valid SKU for the target system or network' ' connection failed. Check whether your system is attached' ' to a valid SKU providing RHEL 8 repositories.') }) target_repoids = [] for target_repo in api.consume(TargetRepositories): for rhel_repo in target_repo.rhel_repos: if rhel_repo.repoid in available_repos: target_repoids.append(rhel_repo.repoid) else: # TODO: We shall report that the RHEL repos that we deem necessary for the upgrade are not available. # The StopActorExecutionError called above might be moved here. pass for custom_repo in target_repo.custom_repos: # TODO: complete processing of custom repositories # HINT: now it will work only for custom repos that exist # + already on the system in a repo file # TODO: should check available_target_repoids + additional custom repos # + outside of rhsm.. # #if custom_repo.repoid in available_target_repoids: target_repoids.append(custom_repo.repoid) api.current_logger().debug("Gathered target repositories: {}".format( ', '.join(target_repoids))) if not target_repoids: raise StopActorExecutionError( message= 'There are no enabled target repositories for the upgrade process to proceed.', details={ 'hint': ('Ensure your system is correctly registered with the subscription manager and that' ' your current subscription is entitled to install the requested target version {version}' ).format( version=api.current_actor().configuration.version.target) }) return target_repoids
def _get_repositories_mapping(): """ Get all repositories mapped from repomap file and map repositories id with respective names. :return: Dictionary with all repositories mapped. """ repositories_mapping = {} repositories_map_msgs = api.consume(RepositoriesMap) repositories_map_msg = next(repositories_map_msgs, None) if list(repositories_map_msgs): api.current_logger().warning( 'Unexpectedly received more than one RepositoriesMap message.') if not repositories_map_msg: raise StopActorExecutionError( 'Cannot parse RepositoriesMap data properly', details={ 'Problem': 'Did not receive a message with mapped repositories' }) for repository in repositories_map_msg.repositories: if repository.arch == api.current_actor().configuration.architecture: repositories_mapping[repository.to_pes_repo] = repository.to_repoid return repositories_mapping
def get_installed_pkgs(): """ Get installed Red Hat-signed packages. :return: A set of tuples holding installed Red Hat-signed package names and their module streams """ installed_pkgs = set() installed_rh_signed_rpm_msgs = api.consume(InstalledRedHatSignedRPM) installed_rh_signed_rpm_msg = next(installed_rh_signed_rpm_msgs, None) if list(installed_rh_signed_rpm_msgs): api.current_logger().warning( 'Unexpectedly received more than one InstalledRedHatSignedRPM message.' ) if not installed_rh_signed_rpm_msg: raise StopActorExecutionError( 'Cannot parse PES data properly due to missing list of installed packages', details={ 'Problem': 'Did not receive a message with installed Red Hat-signed ' 'packages (InstalledRedHatSignedRPM)' }) for pkg in installed_rh_signed_rpm_msg.items: modulestream = None if pkg.module and pkg.stream: modulestream = (pkg.module, pkg.stream) installed_pkgs.add((pkg.name, modulestream)) return installed_pkgs