Ejemplo n.º 1
0
            def wwwpat_matcher(package: Package, package_context: PackageContext, match_context: MatchContext) -> bool:
                if package.links is not None:
                    for link_type, url in package.links:
                        if LinkType.is_relevant_for_rule_matching(link_type) and wwwpat.fullmatch(url):
                            return True

                return False
Ejemplo n.º 2
0
        def postprocess_parsed_packages(
                packages_iter: Iterable[PackageMaker]) -> Iterator[Package]:
            for packagemaker in packages_iter:
                try:
                    package = packagemaker.spawn(
                        repo=repository['name'],
                        family=repository['family'],
                        subrepo=source.get('subrepo'),
                        shadow=repository.get('shadow', False),
                        default_maintainer=repository.get(
                            'default_maintainer'),
                    )
                except RuntimeError as e:
                    packagemaker.log(str(e), Logger.ERROR)
                    raise

                # transform
                if transformer:
                    transformer.process(package)

                # skip removed packages
                if package.has_flag(PackageFlags.REMOVE):
                    continue

                # postprocess flavors
                def strip_flavor(flavor: str) -> str:
                    flavor.removeprefix(package.effname + '-')
                    return flavor

                package.flavors = sorted(
                    set(map(strip_flavor, package.flavors)))

                # add packagelinks
                packagelinks: List[Tuple[int, str]] = []
                for pkglink in source.get('packagelinks', []) + repository.get(
                        'packagelinks', []):
                    if 'type' in pkglink:  # XXX: will become mandatory
                        link_type = LinkType.from_string(pkglink['type'])
                        try:
                            packagelinks.extend(
                                (link_type, url)
                                for url in format_package_links(
                                    package, pkglink['url']))
                        except Exception as e:
                            packagemaker.log(
                                f'cannot spawn package link from template "{pkglink["url"]}": {str(e)}',
                                Logger.ERROR)
                            raise

                if package.links is None:
                    package.links = packagelinks
                else:
                    seen = set(package.links)
                    package.links.extend(link for link in packagelinks
                                         if link not in seen)

                yield package
Ejemplo n.º 3
0
            def wwwpart_matcher(package: Package, package_context: PackageContext, match_context: MatchContext) -> bool:
                if package.links is not None:
                    for link_type, url in package.links:
                        lower_url = url.lower()
                        if LinkType.is_relevant_for_rule_matching(link_type):
                            for wwwpart in wwwparts:
                                if wwwpart in lower_url:
                                    return True

                return False
Ejemplo n.º 4
0
    def __init__(self, repositories_config: YamlConfig) -> None:
        self._repositories = []
        self._repo_by_name = {}

        # process source loops
        for repodata in repositories_config.get_items():
            extra_groups = set()

            sources = []
            for sourcedata in repodata['sources']:
                if sourcedata.get('disabled', False):
                    continue

                for name in _listify(sourcedata['name']):
                    # if there are multiple names, clone source data for each of them
                    processed_sourcedata = _subst_source_recursively(
                        copy.deepcopy(sourcedata), name)
                    sources.append(
                        Source(
                            name=name,
                            subrepo=processed_sourcedata.get('subrepo'),
                            fetcher=processed_sourcedata['fetcher'],
                            parser=processed_sourcedata['parser'],
                            packagelinks=[
                                PackageLink(type=LinkType.from_string(
                                    linkdata['type']),
                                            url=linkdata['url'])
                                for linkdata in processed_sourcedata.get(
                                    'packagelinks', [])
                            ],
                        ))

                extra_groups.add(sourcedata['fetcher']['class'])
                extra_groups.add(sourcedata['parser']['class'])

            repo = Repository(
                name=repodata['name'],
                sortname=repodata.get('sortname', repodata['name']),
                singular=repodata.get('singular',
                                      repodata['desc'] + ' package'),
                type=repodata.get('type', 'repository'),
                desc=repodata['desc'],
                statsgroup=repodata.get('statsgroup', repodata['desc']),
                family=repodata['family'],
                ruleset=_listify(repodata.get('ruleset', repodata['family'])),
                color=repodata.get('color'),
                valid_till=repodata.get('valid_till'),
                default_maintainer=repodata.get('default_maintainer'),
                update_period=_parse_duration(
                    repodata.get('update_period', 600)),
                minpackages=repodata.get('minpackages', 0),
                shadow=repodata.get('shadow', False),
                incomplete=repodata.get('incomplete', False),
                repolinks=repodata.get('repolinks', []),
                packagelinks=[
                    PackageLink(type=LinkType.from_string(linkdata['type']),
                                url=linkdata['url'])
                    for linkdata in repodata.get('packagelinks', [])
                ],
                groups=repodata.get('groups', []) + list(extra_groups),
                sources=sources,
            )

            self._repositories.append(repo)
            self._repo_by_name[repo.name] = repo

        self._repositories = sorted(self._repositories,
                                    key=lambda repo: repo.sortname)