def process():
    # blacklist CRB repo if optional repo is not enabled
    reposid_blacklist = _get_disabled_optional_repo()
    if reposid_blacklist:
        api.current_logger().info(
            "The optional repository is not enabled. Blacklisting the CRB repository."
        )
        api.produce(RepositoriesBlacklisted(repoids=reposid_blacklist))

        report = [
            reporting.Title("Excluded RHEL 8 repositories"),
            reporting.Summary(
                "The following repositories are not supported by "
                "Red Hat and are excluded from the list of repositories "
                "used during the upgrade.\n- {}".format(
                    "\n- ".join(reposid_blacklist))),
            reporting.Severity(reporting.Severity.INFO),
            reporting.Tags([reporting.Tags.REPOSITORY]),
            reporting.Flags([reporting.Flags.FAILURE]),
            reporting.ExternalLink(
                url=("https://access.redhat.com/documentation/en-us/"
                     "red_hat_enterprise_linux/8/html/package_manifest/"
                     "codereadylinuxbuilder-repository."),
                title="CodeReady Linux Builder repository",
            ),
        ]
        reporting.create_report(report)
示例#2
0
def test_actor_messaging_paths(leapp_forked, repository, actor_name):  # noqa; pylint: disable=unused-argument
    messaging = _TestableMessaging()
    with _with_loaded_actor(repository, actor_name,
                            messaging) as (_unused, actor):
        messaging.feed(ApiTestConsume(data='prefilled'), actor)

        assert len(list(actor.consume(ApiTestConsume))) == 1
        assert next(actor.consume(ApiTestConsume)).data == 'prefilled'

        assert len(list(api.consume(ApiTestConsume))) == 1
        assert next(api.consume(ApiTestConsume)).data == 'prefilled'

        actor_message = 'Actor {} sent message via Actor'.format(actor_name)
        api_message = 'Actor {} sent message via API'.format(actor_name)

        actor.produce(ApiTestProduce(data=actor_message))
        assert messaging.produced.pop().data == actor_message

        api.produce(ApiTestProduce(data=api_message))
        assert messaging.produced.pop().data == api_message

        api.report_error("api error report", details={'source': 'api'})
        assert messaging.errors.pop().message.startswith("api ")
        actor.report_error("actor error report", details={'source': 'actor'})
        assert messaging.errors.pop().message.startswith("actor ")
示例#3
0
def process():
    kernels = stdlib.run(["rpm", "-q", "kernel"], split=True)["stdout"]
    for kernel in kernels:
        version = kernel.split("-", 1)[1]
        if "el8" in version:
            api.produce(InstalledTargetKernelVersion(version=version))
            break
示例#4
0
def scan_repositories(path):
    if not os.path.isfile(path):
        inhibit_upgrade('Repositories map file not found ({})'.format(path))

    if os.path.getsize(path) == 0:
        inhibit_upgrade('Repositories map file is invalid ({})'.format(path))

    repositories = []
    with open(path) as f:
        data = csv.reader(f)
        next(data)  # skip header

        for row in data:
            if len(row) != 6:
                continue

            from_id, to_id, from_minor_version, to_minor_version, arch, repo_type = row

            try:
                repositories.append(
                    RepositoryMap(from_id=from_id,
                                  to_id=to_id,
                                  from_minor_version=from_minor_version,
                                  to_minor_version=to_minor_version,
                                  arch=arch,
                                  repo_type=repo_type))
            except ModelViolationError as err:
                inhibit_upgrade(
                    'Repositories map file is invalid ({})'.format(err))

    if len(repositories) == 0:
        inhibit_upgrade('Repositories map file is invalid ({})'.format(path))

    api.produce(RepositoriesMap(repositories=repositories))
示例#5
0
def process():
    output = rpms.get_installed_rpms()
    pkg_repos = get_package_repository_data()
    rpm_streams = map_modular_rpms_to_modules()

    result = InstalledRPM()
    for entry in output:
        entry = entry.strip()
        if not entry:
            continue
        name, version, release, epoch, packager, arch, pgpsig = entry.split('|')
        repository = pkg_repos.get(name, '')
        rpm_key = (name, epoch, version, release, arch)
        module, stream = rpm_streams.get(rpm_key, (None, None))
        result.items.append(RPM(
            name=name,
            version=version,
            epoch=epoch,
            packager=packager,
            arch=arch,
            release=release,
            pgpsig=pgpsig,
            repository=repository,
            module=module,
            stream=stream))
    api.produce(result)
示例#6
0
def check_xfs():
    storage_info_msgs = api.consume(StorageInfo)
    storage_info = next(storage_info_msgs, None)

    if list(storage_info_msgs):
        api.current_logger().warning(
            'Unexpectedly received more than one StorageInfo message.')
    if not storage_info:
        raise StopActorExecutionError(
            'Could not check if XFS is in use.',
            details={'details': 'Did not receive a StorageInfo message'})

    fstab_data = check_xfs_fstab(storage_info.fstab)
    mount_data = check_xfs_mount(storage_info.mount)
    systemdmount_data = check_xfs_systemdmount(storage_info.systemdmount)

    mountpoints = fstab_data | mount_data | systemdmount_data

    xfs_presence = XFSPresence()
    # By now, we only care for XFS without ftype in use for /var
    has_xfs_without_ftype = False
    for mp in ('/var', '/'):
        if mp in mountpoints:
            xfs_presence.present = True
            if is_xfs_without_ftype(mp):
                has_xfs_without_ftype = True

    xfs_presence.without_ftype = has_xfs_without_ftype
    api.produce(xfs_presence)
示例#7
0
def produce_copy_to_target_task():
    """
    Produce task to copy files into the target userspace

    The multipath configuration files are needed when the upgrade init ramdisk
    is generated to ensure we are able to boot into the upgrade environment
    and start the upgrade process itself. By this msg it's told that these
    files/dirs will be available when the upgrade init ramdisk is generated.

    See TargetUserSpaceUpgradeTasks and UpgradeInitramfsTasks for more info.
    """
    # TODO(pstodulk): move the function to the multipathconfcheck actor
    # and get rid of the hardcoded stuff.
    # - The current behaviour looks from the user POV same as before this
    # * commit. I am going to keep the proper fix for additional PR as we do
    # * not want to make the current PR even more complex than now and the solution
    # * is not so trivial.
    # - As well, I am missing some information around xDR devices, which are
    # * possibly not handled correctly (maybe missing some executables?..)
    # * Update: practically we do not have enough info about xDR drivers, but
    # * discussed with Ben Marzinski, as the multipath dracut module includes
    # * the xDR utils stuff, we should handle it in the same way.
    # * See xdrgetuid, xdrgetinfo (these two utils are now missing in our initramfs)
    copy_files = []
    for fname in ['/etc/multipath.conf', '/etc/multipath', '/etc/xdrdevices.conf']:
        if os.path.exists(fname):
            copy_files.append(CopyFile(src=fname))

    if copy_files:
        api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files))
示例#8
0
def process_events(events):
    """ Process PES Events and generate Leapp messages """
    to_install = {}
    to_remove = {}

    for event in events:
        to_install.update(event.out_pkgs)

        if event.action not in ('Present', 'Deprecated',
                                'Moved') and event.in_pkgs:
            to_remove.update(event.in_pkgs)

    filter_by_repositories(to_install)
    map_repositories(to_install)

    to_install_pkgs = set(to_install.keys())
    to_remove_pkgs = set(to_remove.keys())

    common = to_install_pkgs.intersection(to_remove_pkgs)
    to_install_pkgs.difference_update(common)
    to_remove_pkgs.difference_update(common)

    if to_install_pkgs or to_remove_pkgs:
        api.produce(
            RpmTransactionTasks(to_install=list(to_install_pkgs),
                                to_remove=list(to_remove_pkgs)))

    to_enable_repos = set(to_install.values())

    if to_enable_repos:
        api.produce(RepositoriesSetupTasks(to_enable=list(to_enable_repos)))
def process():
    """
    Produce CustomTargetRepository msgs for the custom repo file if the file
    exists.

    The CustomTargetRepository msg is produced for every repository inside
    the <CUSTOM_REPO_PATH> file.
    """
    if not os.path.isfile(CUSTOM_REPO_PATH):
        api.current_logger().debug(
            "The {} file doesn't exist. Nothing to do.".format(
                CUSTOM_REPO_PATH))
        return
    api.current_logger().info("The {} file exists.".format(CUSTOM_REPO_PATH))
    repofile = repofileutils.parse_repofile(CUSTOM_REPO_PATH)
    if not repofile.data:
        return
    api.produce(CustomTargetRepositoryFile(file=CUSTOM_REPO_PATH))
    for repo in repofile.data:
        api.produce(
            CustomTargetRepository(
                repoid=repo.repoid,
                name=repo.name,
                baseurl=repo.baseurl,
                enabled=repo.enabled,
            ))
示例#10
0
def produce_restricted_pcis():
    """
    Produce RestrictedPCIDevice message from the online or offline sources.

    The data sources preference order is the following:
    1. We try to get the data from the /etc/leapp/files
    2. We try to get the data from the only web service
    """
    unsupported_driver_names = {"devices": {}}
    unsupported_pci_ids = {"devices": {}}
    try:
        unsupported_driver_names = fetch.read_or_fetch(UNSUPPORTED_DRIVER_NAMES_FILE)
        unsupported_pci_ids = fetch.read_or_fetch(UNSUPPORTED_PCI_IDS_FILE)
        unsupported_driver_names = json.loads(unsupported_driver_names, encoding="utf-8")
        unsupported_pci_ids = json.loads(unsupported_pci_ids, encoding="utf-8")
    except (JSONDecodeError, UnicodeDecodeError):
        raise StopActorExecutionError("The required files have invalid JSON format and can't be decoded.")

    # trying to produce the message from received data
    try:
        api.produce(RestrictedPCIDevices.create({
                    "driver_names": tuple(unsupported_driver_names["devices"].values()),
                    "pci_ids": tuple(unsupported_pci_ids["devices"].values())}))
    # bad data format
    except (KeyError, AttributeError, TypeError, ModelViolationError):
        raise StopActorExecutionError(
            "Can't produce RestrictedPCIDevices message. The data are incompatible.")
def produce_messages(tasks):
    # Type casting to list to be Py2&Py3 compatible as on Py3 keys() returns dict_keys(), not a list
    to_install_pkgs = sorted(tasks[Task.INSTALL].keys())
    to_remove_pkgs = sorted(tasks[Task.REMOVE].keys())
    to_enable_repos = sorted(
        set(tasks[Task.INSTALL].values()) | set(tasks[Task.KEEP].values()))

    if to_install_pkgs or to_remove_pkgs:
        enabled_modules = _get_enabled_modules()
        modules_to_enable = [
            Module(name=p[1][0], stream=p[1][1]) for p in to_install_pkgs
            if p[1]
        ]
        modules_to_reset = enabled_modules
        to_install_pkg_names = [p[0] for p in to_install_pkgs]
        to_remove_pkg_names = [p[0] for p in to_remove_pkgs]

        api.produce(
            PESRpmTransactionTasks(to_install=to_install_pkg_names,
                                   to_remove=to_remove_pkg_names,
                                   modules_to_enable=modules_to_enable,
                                   modules_to_reset=modules_to_reset))

    if to_enable_repos:
        api.produce(RepositoriesSetupTasks(to_enable=to_enable_repos))
示例#12
0
def process():
    # pylint: disable=no-else-return  - false positive
    # TODO: should we take care about stuff of kernel-rt and kernel in the same
    # time when both are present? or just one? currently, handle only one
    # of these during the upgrade. kernel-rt has higher prio when original sys
    # was realtime

    if is_rhel_realtime():
        version = _get_kernel_version('kernel-rt')
        if version:
            api.produce(InstalledTargetKernelVersion(version=version))
            return
        else:
            api.current_logger().warning(
                'The kernel-rt rpm from RHEL 8 has not been detected. Switching to non-preemptive kernel.'
            )
            # TODO: create report with instructions to install kernel-rt manually
            # - attach link to article if any
            # - this possibly happens just in case the repository with kernel-rt
            # # is not enabled during the upgrade.

    # standard (non-preemptive) kernel
    version = _get_kernel_version('kernel')
    if version:
        api.produce(InstalledTargetKernelVersion(version=version))
    else:
        # This is very unexpected situation. At least one kernel has to be
        # installed always. Some actors consuming the InstalledTargetKernelVersion
        # will crash without the created message. I am keeping kind of original
        # behaviour in this case, but at least the let me log the error msg
        #
        api.current_logger().error('Cannot detect any kernel RPM')
示例#13
0
def process():
    # blacklist CRB repo if optional repo is not enabled
    reposid_blacklist = _get_disabled_optional_repo()
    if reposid_blacklist:
        api.current_logger().info(
            "The optional repository is not enabled. Blacklisting the CRB repository."
        )
        api.produce(RepositoriesBlacklisted(repoids=reposid_blacklist))
示例#14
0
def process():
    cpuinfo = CPUInfo()

    machine_types = [RE_MACHINE_TYPE.findall(line) for line in _get_cpuinfo()]
    machine_types = [i[0] for i in machine_types if i]
    if machine_types:
        # machine type should be same for all found cpus
        cpuinfo.machine_type = int(machine_types[0])
    api.produce(cpuinfo)
def process():
    rhel7_ifaces = next(api.consume(PersistentNetNamesFacts)).interfaces
    rhel8_ifaces = next(api.consume(PersistentNetNamesFactsInitramfs)).interfaces

    rhel7_ifaces_map = {iface.mac: iface for iface in rhel7_ifaces}
    rhel8_ifaces_map = {iface.mac: iface for iface in rhel8_ifaces}

    initrd_files = []
    missing_ifaces = []
    renamed_interfaces = []

    if rhel7_ifaces != rhel8_ifaces:
        for iface in rhel7_ifaces:
            rhel7_name = rhel7_ifaces_map[iface.mac].name
            try:
                rhel8_name = rhel8_ifaces_map[iface.mac].name
            except KeyError:
                missing_ifaces.append(iface)
                api.current_logger().warning(
                    'The device with MAC "{}" is not detected in the upgrade'
                    ' environment. Required driver: "{}".'
                    ' Original interface name: "{}".'
                    .format(iface.mac, iface.driver, iface.name)
                )
                continue

            if rhel7_name != rhel8_name:
                api.current_logger().warning('Detected interface rename {} -> {}.'.format(rhel7_name, rhel8_name))

                if re.search('eth[0-9]+', iface.name) is not None:
                    api.current_logger().warning('Interface named using eth prefix, refusing to generate link file')
                    renamed_interfaces.append(RenamedInterface(**{'rhel7_name': rhel7_name,
                                                                  'rhel8_name': rhel8_name}))
                    continue

                initrd_files.append(generate_link_file(iface))

    if missing_ifaces:
        msg = (
            'Some network devices have not been detected inside the'
            ' upgrade environment and so related network interfaces'
            ' could be renamed on the upgraded system.'
        )
        # Note(pstodulk):
        # This usually happens when required (RHEL 8 compatible)
        # drivers are not included in the upgrade initramfs.
        # We can add more information later. Currently we cannot provide
        # better instructions for users before (at least):
        # a) networking work in the upgrade initramfs (PR #583)
        # b) it's possible to influence the upgrade initramfs (PR #517)
        # TODO(pstodulk): gen report msg
        api.current_logger().warning(msg)

    api.produce(RenamedInterfaces(renamed=renamed_interfaces))
    api.produce(InitrdIncludes(files=initrd_files))
示例#16
0
def scan_source_boot_loader_configuration():
    """
    Scans the boot loader configuration.

    Produces :class:`SourceBootLoaderConfiguration for other actors to act upon.
    """

    boot_loader_configuration = SourceBootLoaderConfiguration(
        entries=scan_boot_entries())

    api.produce(boot_loader_configuration)
def process():
    location = api.get_folder_path('bundled-rpms')
    local_rpms = []
    for name in os.listdir(location):
        if name.endswith('.rpm'):
            # It is important to put here the realpath to the files here, because
            # symlinks cannot be resolved properly inside of the target userspace since they use the /installroot
            # mount target
            local_rpms.append(os.path.realpath(os.path.join(location, name)))
    if local_rpms:
        api.produce(RpmTransactionTasks(local_rpms=local_rpms))
示例#18
0
def produce_messages(tasks):
    # Type casting to list to be Py2&Py3 compatible as on Py3 keys() returns dict_keys(), not a list
    to_install_pkgs = sorted(tasks['to_install'].keys())
    to_remove_pkgs = sorted(tasks['to_remove'].keys())
    to_enable_repos = sorted(set(tasks['to_install'].values() + tasks['to_keep'].values()))

    if to_install_pkgs or to_remove_pkgs:
        api.produce(PESRpmTransactionTasks(to_install=to_install_pkgs,
                                           to_remove=to_remove_pkgs))

    if to_enable_repos:
        api.produce(RepositoriesSetupTasks(to_enable=to_enable_repos))
示例#19
0
def produce_messages(to_install, to_remove):
    to_install_pkgs = set(to_install.keys())
    to_remove_pkgs = set(to_remove.keys())
    to_enable_repos = set(to_install.values())

    if to_install_pkgs or to_remove_pkgs:
        api.produce(
            PESRpmTransactionTasks(to_install=list(to_install_pkgs),
                                   to_remove=list(to_remove_pkgs)))

    if to_enable_repos:
        api.produce(RepositoriesSetupTasks(to_enable=list(to_enable_repos)))
示例#20
0
def copy_boot_files(userspace_dir):
    kernel = 'vmlinuz-upgrade.x86_64'
    initram = 'initramfs-upgrade.x86_64.img'
    content = BootContent(
        kernel_path=os.path.join('/boot', kernel),
        initram_path=os.path.join('/boot', initram)
    )

    run(['cp', '-a', os.path.join(userspace_dir, 'artifacts', kernel), content.kernel_path])
    run(['cp', '-a', os.path.join(userspace_dir, 'artifacts', initram), content.initram_path])

    api.produce(content)
示例#21
0
def create_report(entries):
    """
    Create final report message
    """

    report = {}

    _sanitize_entries(entries)
    for entry in entries:
        entry.apply(report)

    produce(Report(report=report))
def scan_repositories(read_repofile_func=_read_repofile):
    """
    Scan the repository mapping file and produce RepositoriesMap msg.

    See the description of the actor for more details.
    """
    _exp_src_prod_type = config.get_product_type('source')
    _exp_dst_prod_type = config.get_product_type('target')

    repositories = []
    line_num = 0
    for line in read_repofile_func(REPOMAP_FILE)[1:]:
        line_num += 1

        # skip empty lines and comments
        if not line or line.startswith('#'):
            continue

        try:
            (from_repoid, to_repoid, to_pes_repo, from_minor_version,
             to_minor_version, arch, repo_type, src_prod_type,
             dst_prod_type) = line.split(',')

            # filter out records irrelevant for this run
            if (arch != api.current_actor().configuration.architecture
                    or _exp_src_prod_type != src_prod_type
                    or _exp_dst_prod_type != dst_prod_type):
                continue

            repositories.append(
                RepositoryMap(
                    from_repoid=from_repoid,
                    to_repoid=to_repoid,
                    to_pes_repo=to_pes_repo,
                    from_minor_version=from_minor_version,
                    to_minor_version=to_minor_version,
                    arch=arch,
                    repo_type=repo_type,
                ))
        except (ModelViolationError, ValueError) as err:
            _inhibit_upgrade(
                'The repository mapping file is invalid, offending line number: {} ({}).'
                ' It is possible the file is out of date.'.format(
                    line_num, err))

    if not repositories:
        _inhibit_upgrade(
            'The repository mapping file is invalid. Could not find any repository mapping record.'
        )

    api.produce(RepositoriesMap(repositories=repositories))
示例#23
0
def process():
    if not architecture.matches_architecture(architecture.ARCH_S390X):
        return
    if os.path.isfile(DASD_CONF):
        # the file has to be copied into the targetuserspace container first,
        # then it can be included into the initramfs ==> both messages are
        # needed to be produced
        api.produce(TargetUserSpaceUpgradeTasks(copy_files=[CopyFile(src=DASD_CONF)]))
        api.produce(UpgradeInitramfsTasks(include_files=[DASD_CONF]))
    else:
        api.current_logger().warning(
            "The {} file has not been discovered. DASD not used?"
            .format(DASD_CONF)
        )
def scan_files_to_copy():
    """
    Scans the source system and identifies files that should be copied into target userspace.

    When an item to be copied is identified a message :class:`CopyFile` is produced.
    """
    files_to_copy = []
    for src_path in FILES_TO_COPY_IF_PRESENT:
        if os.path.isfile(src_path):
            dst_path = FILES_TO_COPY_IF_PRESENT[src_path]
            files_to_copy.append(CopyFile(src=src_path, dst=dst_path))

    preupgrade_task = TargetUserSpacePreupgradeTasks(copy_files=files_to_copy)

    api.produce(preupgrade_task)
示例#25
0
def copy_boot_files(context):
    """
    Function to copy the generated initram and corresponding kernel to /boot - Additionally produces a BootContent
    message with their location.
    """
    kernel = 'vmlinuz-upgrade.x86_64'
    initram = 'initramfs-upgrade.x86_64.img'
    content = BootContent(kernel_path=os.path.join('/boot', kernel),
                          initram_path=os.path.join('/boot', initram))

    context.copy_from(os.path.join('/artifacts', kernel), content.kernel_path)
    context.copy_from(os.path.join('/artifacts', initram),
                      content.initram_path)

    api.produce(content)
示例#26
0
def get_grub_device():
    """
    Get block device where GRUB is located. We assume GRUB is on the same device
    as /boot partition is.

    """
    grub_dev = os.getenv('LEAPP_GRUB_DEVICE', None)
    if grub_dev:
        api.produce(GrubDevice(grub_device=grub_dev))
        return
    boot_partition = get_boot_partition()
    grub_dev = blk_dev_from_partition(boot_partition)
    if grub_dev:
        if has_grub(grub_dev):
            api.produce(GrubDevice(grub_device=grub_dev))
示例#27
0
def copy_boot_files(context):
    """
    Function to copy the generated initram and corresponding kernel to /boot - Additionally produces a BootContent
    message with their location.
    """
    curr_arch = api.current_actor().configuration.architecture
    kernel = 'vmlinuz-upgrade.{}'.format(curr_arch)
    initram = 'initramfs-upgrade.{}.img'.format(curr_arch)
    content = BootContent(kernel_path=os.path.join('/boot', kernel),
                          initram_path=os.path.join('/boot', initram))

    context.copy_from(os.path.join('/artifacts', kernel), content.kernel_path)
    context.copy_from(os.path.join('/artifacts', initram),
                      content.initram_path)

    api.produce(content)
示例#28
0
def scan_repositories(path):
    if not os.path.isfile(path):
        inhibit_upgrade('Repositories map file not found ({})'.format(path))

    if os.path.getsize(path) == 0:
        inhibit_upgrade('Repositories map file is invalid ({})'.format(path))

    repositories = []
    with open(path) as f:
        data = csv.reader(f)
        next(data)  # skip header

        for row in data:
            # skip empty lines and comments
            if not row or row[0].startswith('#'):
                continue

            try:
                from_repoid, to_repoid, to_pes_repo, from_minor_version, to_minor_version, arch, repo_type = row
            except ValueError as err:
                inhibit_upgrade('Repositories map file is invalid, offending line number: {} ({})'.format(
                    data.line_num, err))

            if arch != api.current_actor().configuration.architecture:
                continue

            try:
                repositories.append(
                    RepositoryMap(
                        from_repoid=from_repoid,
                        to_repoid=to_repoid,
                        to_pes_repo=to_pes_repo,
                        from_minor_version=from_minor_version,
                        to_minor_version=to_minor_version,
                        arch=arch,
                        repo_type=repo_type,
                    )
                )
            except ModelViolationError as err:
                inhibit_upgrade('Repositories map file is invalid, offending line number: {} ({})'.format(
                    data.line_num, err))

    if not repositories:
        inhibit_upgrade('Repositories map file is invalid ({})'.format(path))

    api.produce(RepositoriesMap(repositories=repositories))
def scan_repositories(read_repofile_func=_read_repofile):
    """
    Scan the repository mapping file and produce RepositoriesMap msg.

    See the description of the actor for more details.
    """
    # TODO: add filter based on the current arch
    # TODO: deprecate the product type and introduce the "channels" ?.. more or less
    # NOTE: product type is changed, now it's channel: eus,e4s,aus,tus,ga,beta

    if os.path.exists(os.path.join('/etc/leapp/files', OLD_REPOMAP_FILE)):
        # NOTE: what about creating the report (instead of warning)
        api.current_logger().warning(
            'The old repomap file /etc/leapp/files/repomap.csv is present.'
            ' The file has been replaced by the repomap.json file and it is'
            ' not used anymore.')

    json_data = read_repofile_func(REPOMAP_FILE)
    try:
        repomap_data = RepoMapData.load_from_dict(json_data)
        mapping = repomap_data.get_mappings(get_source_major_version(),
                                            get_target_major_version())

        valid_major_versions = [
            get_source_major_version(),
            get_target_major_version()
        ]
        api.produce(
            RepositoriesMapping(mapping=mapping,
                                repositories=repomap_data.get_repositories(
                                    valid_major_versions)))
    except ModelViolationError as err:
        err_message = (
            'The repository mapping file is invalid: '
            'the JSON does not match required schema (wrong field type/value): {}'
            .format(err))
        _inhibit_upgrade(err_message)
    except KeyError as err:
        _inhibit_upgrade(
            'The repository mapping file is invalid: the JSON is missing a required field: {}'
            .format(err))
    except ValueError as err:
        # The error should contain enough information, so we do not need to clarify it further
        _inhibit_upgrade(
            'The repository mapping file is invalid: {}'.format(err))
示例#30
0
def report(title=None,
           detail=None,
           renderers=None,
           audience=None,
           flags=None,
           severity=None):
    """
    Create and produce a report entry

    For more information about the arguments, please refer to the :class:`Report` model

    :param title: Title of the report message
    :type title: str
    :param detail: Report message details
    :type detail: dict
    :param renderers: Available report entry renderers (e.g. html / plaintext)
    :type renderers: dict
    :param audience: Target audience of the report
    :type audience: list
    :param flags: Functionality flags (e.g. inhibitor)
    :type flags: list
    :param severity: Report severity
    :type severity: str
    """

    # set some healthy defaults
    audience = audience or ['sysadmin']
    severity = severity or 'medium'
    flags = flags or []

    renderers = Renderers(**renderers)
    report_entry = {
        'title': title,
        'detail': detail,
        'renderers': renderers,
        'severity': severity,
        'flags': flags,
        'audience': audience,
    }

    if 'title' in report_entry['detail']:
        raise ValueError(
            'Key "title" cannot be present in the report "detail" structure')

    produce(Report(**report_entry))