def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: for filename in os.listdir(os.path.join(path)): if not filename.endswith('.txt'): continue with factory.begin(filename) as pkg: config = configparser.ConfigParser(interpolation=None) with open(os.path.join(path, filename), 'r', encoding='utf_8_sig') as f: config.readfp(f) section = config['Section'] pkg.add_name(filename[:-4], NameType.REACTOS_FILENAME) if section.get('Version') is None: pkg.log('no version defined', Logger.ERROR) continue pkg.set_version(section['Version']) pkg.set_summary(section['Description']) pkg.add_homepages(section.get('URLSite')) pkg.add_downloads(section['URLDownload']) pkg.add_licenses(section.get('License')) pkg.add_name(section['Name'], NameType.REACTOS_NAME) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: normalize_version = VersionStripper().strip_left_greedy(':') with open(path, 'r', encoding='utf-8') as jsonfile: for packagedata in json.load(jsonfile): pkg = factory.begin() pkg.set_name(packagedata['name']) pkg.set_version(packagedata['version'], normalize_version) pkg.set_summary(packagedata['description']) pkg.add_homepages(packagedata['homepage']) pkg.add_downloads(packagedata.get('srcurl')) pkg.add_maintainers( extract_maintainers(packagedata['maintainer'])) # maintainer may also be in '@username' form match = re.search('(?:^| )@([^ ]+)$', packagedata['maintainer']) if match: pkg.add_maintainers(match.group(1).lower() + '@termux') yield pkg
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: for fmri, pkgdata in _iter_packages(path): with factory.begin(f'{fmri} {pkgdata["version"]}') as pkg: variables = _parse_actions(pkgdata['actions']) # these are entries without name, likely not really packages # skip these early to avoid parsing other stuff and polluting logs with warnings if 'com.oracle.info.name' not in variables or 'com.oracle.info.version' not in variables: continue # Regarding comment requirement: there are some packages which lack it, # however for ALL of them have counterparts with comment and some # additional fields (category, homepage, downloads). Packages without # comment look like legacy, and it's OK and desirable to drop them here if 'pkg.summary' not in variables: continue pkg.add_name(variables['com.oracle.info.name'][0], NameType.OPENINDIANA_NAME) pkg.add_name(fmri, NameType.OPENINDIANA_FMRI) pkg.set_version(variables['com.oracle.info.version'][0]) pkg.set_summary(variables['pkg.summary'][0]) pkg.add_categories( cat.rsplit(':', 1)[-1] for cat in variables.get('info.classification', [])) pkg.add_homepages(variables.get('info.upstream-url')) pkg.add_downloads(variables.get('info.source-url')) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: for conandata_abs_path in walk_tree(path, name='conandata.yml'): conandata_rel_path = os.path.relpath(conandata_abs_path, path) with factory.begin(conandata_rel_path) as pkg: pkg.add_name( conandata_rel_path.split('/')[1], NameType.CONAN_RECIPE_NAME) with open(conandata_abs_path) as fd: conandata = yaml.safe_load(fd) patches = _extract_patches(conandata) for version_info in _extract_version_infos(conandata): verpkg = pkg.clone(append_ident=':' + version_info.version) verpkg.set_version(version_info.version) # XXX: we may create more subpackages here based on url_info.tags # which may contain various OSes, architectures, compilers and probably # other specifics (see cspice/all/conandata.yml for example) for url_info in version_info.url_infos: verpkg.add_downloads(url_info.url) if version_info.version in patches: verpkg.set_extra_field('patch', patches[version_info.version]) verpkg.set_extra_field('folder', conandata_rel_path.split('/')[2]) yield verpkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Generator[PackageMaker, None, None]: for pkgdir in os.listdir(os.path.join(path, 'ports')): controlpath = os.path.join(path, 'ports', pkgdir, 'CONTROL') if not os.path.exists(controlpath): continue pkg = factory.begin() pkg.set_origin(pkgdir) pkg.set_name(pkgdir) with open(controlpath, 'r', encoding='utf-8', errors='ignore') as controlfile: for line in controlfile: line = line.strip() if line.startswith('Version:') and not pkg.version: version = line[8:].strip() if re.match('[0-9]{4}[.-][0-9]{1,2}[.-][0-9]{1,2}', version): pkg.set_version(version) pkg.set_flags(PackageFlags.ignore) else: pkg.set_version(version, normalize_version) elif line.startswith('Description:') and not pkg.comment: pkg.set_summary(line[12:]) # pretty much a hack to shut a bunch of fake versions up portfilepath = os.path.join(path, 'ports', pkgdir, 'portfile.cmake') if os.path.exists(portfilepath): with open(portfilepath, 'r', encoding='utf-8', errors='ignore') as portfile: for line in portfile: if 'libimobiledevice-win32' in line: pkg.log('marking as untrusted, https://github.com/libimobiledevice-win32 accused of version faking', severity=Logger.WARNING) pkg.set_flags(PackageFlags.untrusted) break yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: with open(path, 'r', encoding='utf-8') as htmlfile: for match in re.findall( '<td><a href="/pypi/([^"]+)/([^"]+)">[^<>]*</a></td>[ \n]*<td>([^<>]*)</td>', htmlfile.read(), flags=re.MULTILINE): pkg = factory.begin() pkg.set_name(match[0]) pkg.set_version(match[1]) comment = match[2].strip() if '\n' in comment: pkg.log('{}: summary is multiline'.format(pkg.name), severity=Logger.WARNING) else: pkg.set_summary(comment) pkg.add_homepages('https://pypi.python.org/pypi/{}/{}'.format( match[0], match[1])) yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: for filename in os.listdir(os.path.join(path)): if not filename.endswith('.txt'): continue with factory.begin(filename) as pkg: config = configparser.ConfigParser(interpolation=None) with open(os.path.join(path, filename), 'r', encoding='utf_8_sig') as f: config.readfp(f) section = config['Section'] pkg.set_name(filename[:-4]) pkg.set_version(section.get('Version')) pkg.set_summary(section['Description']) pkg.add_homepages(section.get('URLSite')) pkg.add_downloads(section['URLDownload']) pkg.add_licenses(section.get('License')) pkg.set_extra_field('longname', section['Name']) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: root = xml.etree.ElementTree.parse(path) for info in root.findall('./info'): with factory.begin() as pkg: name, epoch, version, release, arch = nevra_parse( info.attrib['fn']) pkg.add_name(name, NameType.GENERIC_PKGNAME) pkg.set_version(version) pkg.set_rawversion( nevra_construct(None, epoch, version, release)) pkg.set_arch(arch) # What we do here is we try to extract prerelease part # and mark version as ignored with non-trivial ROSAREV, # as it it likely a snapshot and trus cannot be trusted if not version.isdecimal(): pkg.set_flags(PackageFlags.IGNORE) match = re.search('\\b(a|alpha|b|beta|pre|rc)[0-9]+', version.lower()) if match: pkg.set_version(version + match.group(0)) pkg.add_homepages(info.attrib['url']) pkg.add_licenses(info.attrib['license']) yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: normalize_version = VersionStripper().strip_right('+') for cygport_path, cygport_name in _iter_cygports(path): pkg = factory.begin(cygport_name) # XXX: save *bl* to rawversion match = re.match('(.*)-[0-9]+bl[0-9]+\\.cygport$', cygport_name) if not match: pkg.log('unable to parse cygport name', severity=Logger.ERROR) continue pkg.set_name_and_version(match.group(1), normalize_version) # these fields not contain variables (for now), so are safe to extract with open(cygport_path, 'r') as cygdata: for line in cygdata: match = re.match('CATEGORY="([^"$]+)"', line) if match: pkg.add_categories(match.group(1)) match = re.match('SUMMARY="([^"$]+)"', line) if match: pkg.set_summary(match.group(1)) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: normalize_version = VersionStripper().strip_right('nb') with open(path, encoding='utf-8') as indexfile: for line in indexfile: pkg = factory.begin() fields = line.strip().split('|') if len(fields) != 12: pkg.log('skipping, unexpected number of fields {}'.format( len(fields)), severity=Logger.ERROR) continue if not fields[0]: pkg.log('skipping, empty first field', severity=Logger.ERROR) continue name, version = fields[0].rsplit('-', 1) pkg.add_name(name, NameType.BSD_PKGNAME) pkg.add_name(fields[1], NameType.BSD_ORIGIN) pkg.set_version(version, normalize_version) pkg.set_summary(fields[3]) # sometimes OWNER variable is used in which case # there's no MAINTAINER OWNER doesn't get to INDEX pkg.add_maintainers(extract_maintainers(fields[5])) pkg.add_categories(fields[6].split()) pkg.add_homepages(fields[11]) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: for filename in walk_tree(path, suffix='.json'): data = json.load(open(filename, encoding='utf-8', errors='ignore')) if 'versions' not in data: continue with factory.begin(filename) as pkg: pkg.add_name(data['name'], NameType.BUCKAROO_NAME) pkg.add_name( os.path.basename(filename)[:-5], NameType.BUCKAROO_FILENAME) pkg.add_licenses(data['license']) pkg.add_homepages(data['url']) pkg.set_extra_field('recipe', os.path.relpath(filename, path)) for version, versiondata in data['versions'].items(): verpkg = pkg.clone() verpkg.set_version(version) # not parsing sources as these contain references to specific commit snapshots yield verpkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: result: Dict[str, PackageMaker] = {} # note that we actually parse database prepared by # fetcher, not the file we've downloaded with open(path, 'rb') as jsonfile: for entry in JsonSlicer(jsonfile, ('releases', None)): pkg = factory.begin() pkg.set_name(entry['name']) pkg.set_version(entry['version']) if not pkg.check_sanity(verbose=False): continue pkg.add_homepages(entry.get('homepage')) pkg.set_summary(entry.get('summary')) if not pkg.comment: pkg.set_summary(entry.get('description')) # multiline #pkg.add_maintainers(entry.get('submitter') + '@freshcode') # unfiltered garbage #pkg.add_downloads(entry.get('download')) # ignore for now, may contain download page urls instead of file urls pkg.add_licenses(entry.get('license')) # take latest known versions if pkg.name not in result or version_compare( pkg.version, result[pkg.name].version) > 0: result[pkg.name] = pkg yield from result.values()
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: for header in rpm.readHeaderListFromFile(path): with factory.begin() as pkg: fields = { key: str(header[key], self.encoding) if header[key] is not None else None for key in [ 'name', 'version', 'release', 'packager', 'group', 'summary' ] } pkg.set_name(fields['name']) pkg.set_version(fields['version']) # XXX: handle release if fields['version'] is None: raise RuntimeError('version not defined') pkg.set_rawversion( nevra_construct(None, header['epoch'], fields['version'], fields['release'])) if fields['packager']: pkg.add_maintainers(extract_maintainers(fields['packager']) ) # XXX: may have multiple maintainers pkg.add_categories(fields['group']) pkg.set_summary(fields['summary']) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: for pagepath in os.listdir(path): if not pagepath.endswith('.xml'): continue root = xml.etree.ElementTree.parse(os.path.join(path, pagepath)) for entry in root.findall('{http://www.w3.org/2005/Atom}entry'): pkg = factory.begin() pkg.set_name( entry.find('{http://www.w3.org/2005/Atom}title').text ) # type: ignore pkg.set_version( entry.find( '{http://schemas.microsoft.com/ado/2007/08/dataservices/metadata}properties/{http://schemas.microsoft.com/ado/2007/08/dataservices}Version' ).text) # type: ignore pkg.add_homepages( entry.find( '{http://schemas.microsoft.com/ado/2007/08/dataservices/metadata}properties/{http://schemas.microsoft.com/ado/2007/08/dataservices}ProjectUrl' ).text) # type: ignore commentnode = entry.find( '{http://www.w3.org/2005/Atom}summary') if commentnode is not None: pkg.set_summary(commentnode.text) yield pkg
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: for pkgdata in _iter_packages(path): with factory.begin(pkgdata['Package']) as pkg: fixed_version, flags = parse_debian_version(pkgdata['Version']) pkg.set_version(fixed_version) pkg.set_rawversion(pkgdata['Version']) pkg.set_flags(flags) pkg.add_maintainers( extract_maintainers(pkgdata.get('Maintainer', ''))) pkg.add_maintainers( extract_maintainers(pkgdata.get('Uploaders', ''))) pkg.add_categories(pkgdata.get('Section')) pkg.add_homepages(pkgdata.get('Homepage')) self._extra_handling(pkg, pkgdata) if (url := _extract_vcs_link(pkgdata)) is not None: if self._allowed_vcs_urls_re is not None and self._allowed_vcs_urls_re.match( url): pkg.add_links(LinkType.PACKAGE_SOURCES, url) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: normalize_version = VersionStripper().strip_right('+') for cygport_path, cygport_name in _iter_cygports(path): pkg = factory.begin(cygport_name) # XXX: save *bl* to rawversion match = re.match('(.*)-[0-9]+bl[0-9]+\\.cygport$', cygport_name) if not match: raise RuntimeError('unable to parse cygport name') name, version = match.group(1).rsplit('-', 1) pkg.add_name(name, NameType.YACP_NAME) pkg.set_version(version, normalize_version) # these fields do not contain variables (for now), so are safe to extract with open(cygport_path, 'r') as cygdata: for line in cygdata: match = re.match('CATEGORY="([^"$]+)"', line) if match: pkg.add_categories(match.group(1)) match = re.match('SUMMARY="([^"$]+)"', line) if match: pkg.set_summary(match.group(1)) yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: normalize_version = VersionStripper().strip_right(',').strip_right('_') with open(path, encoding='utf-8') as indexfile: for line in indexfile: pkg = factory.begin() fields = line.strip().split('|') if len(fields) != 13: pkg.log('skipping, unexpected number of fields {}'.format( len(fields)), severity=Logger.ERROR) continue pkg.set_name_and_version(fields[0], normalize_version) pkg.set_summary(fields[3]) pkg.add_maintainers(extract_maintainers(fields[5])) pkg.add_categories(fields[6].split()) pkg.add_homepages(fields[9]) port_path = fields[1].split('/') pkg.set_extra_field('portname', port_path[-1]) pkg.set_origin('/'.join(port_path[-2:])) yield pkg
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: bucket_path = os.path.join(path, 'bucket') for filename in os.listdir(bucket_path): if not filename.endswith('.json'): continue with factory.begin(filename) as pkg: with open(os.path.join(bucket_path, filename)) as fd: pkgdata = json.load(fd) pkg.add_name(filename.removesuffix('.json'), NameType.BAULK_NAME) pkg.set_version(pkgdata['version']) pkg.set_summary(pkgdata['description']) homepage = pkgdata.get('homepage') if homepage and 'baulk' in homepage: pkg.log('Not trusting package with baulk upstream', severity=Logger.ERROR) pkg.set_flags(PackageFlags.UNTRUSTED) pkg.add_homepages(homepage) pkg.add_downloads(pkgdata.get('url'), pkgdata.get('url64')) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: normalize_version = VersionStripper().strip_right_greedy( '-').strip_left(':').strip_right_greedy('+') for filename in os.listdir(path): if not filename.endswith('.json'): continue with open(os.path.join(path, filename), 'r') as jsonfile: for result in json.load(jsonfile)['results']: pkg = factory.begin() pkg.set_name(result['Name']) pkg.set_version(result['Version'], normalize_version) pkg.set_summary(result['Description']) pkg.add_homepages(result['URL']) pkg.add_licenses(result.get('License')) if 'Maintainer' in result and result['Maintainer']: pkg.add_maintainers( extract_maintainers(result['Maintainer'] + '@aur')) if 'PackageBase' in result and result['PackageBase']: pkg.set_basename(result['PackageBase']) # XXX: enable when we support multiple categories #if 'Keywords' in result and result['Keywords']: # pkg.add_categories(result['Keywords']) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: for filename in walk_tree(path, suffix='pspec.xml'): relpath = os.path.relpath(filename, path) pkg = factory.begin(relpath) try: root = xml.etree.ElementTree.parse(filename) except xml.etree.ElementTree.ParseError as e: pkg.log('Cannot parse XML: ' + str(e), Logger.ERROR) continue pkg.set_name(root.find('./Source/Name').text) # type: ignore pkg.set_summary(root.find('./Source/Summary').text) # type: ignore pkg.add_homepages( map(lambda el: el.text, root.findall('./Source/Homepage'))) pkg.add_downloads( map(lambda el: el.text, root.findall('./Source/Archive'))) pkg.add_licenses( map(lambda el: el.text, root.findall('./Source/License'))) pkg.add_categories( map(lambda el: el.text, root.findall('./Source/IsA'))) pkg.add_maintainers( map(lambda el: el.text, root.findall('./Source/Packager/Email'))) pkg.set_extra_field('pspecdir', os.path.dirname(relpath)) lastupdate = max(root.findall('./History/Update'), key=lambda el: int(el.attrib['release'])) pkg.set_version(lastupdate.find('./Version').text) # type: ignore yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: with open(os.path.join(path, 'APKINDEX'), 'r', encoding='utf-8') as apkindex: state = {} for line in apkindex: line = line.strip() if line: state[line[0]] = line[2:].strip() continue # empty line, we can flush our state if state and state['P'] == state['o']: pkg = factory.begin() pkg.set_name(state['P']) pkg.set_version(state['V'], normalize_version) pkg.set_summary(state['T']) pkg.add_homepages(state['U']) # XXX: split? pkg.add_licenses(state['L']) pkg.add_maintainers(extract_maintainers(state.get('m'))) yield pkg state = {}
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: for filename in walk_tree(path, suffix='.json'): data = json.load(open(filename, encoding='utf-8', errors='ignore')) if 'versions' not in data: continue for version, versiondata in data['versions'].items(): pkg = factory.begin() pkg.set_name(data['name']) pkg.set_version(version) pkg.add_licenses(data['license']) pkg.add_homepages(data['url']) pkg.set_extra_field('recipe', os.path.relpath(filename, path)) # garbage: links to git:// or specific commits #if isinstance(versiondata['source'], str): # pkg.downloads = [versiondata['source']] #else: # pkg.downloads = [versiondata['source']['url']] yield pkg
class TestPackage: _factory: PackageFactory _package_maker: Optional[PackageMaker] _package: Optional[Package] def __init__(self, logger: Logger = NoopLogger()) -> None: self._factory = PackageFactory(logger) self._package_maker = None self._package = None def __enter__(self) -> PackageMaker: self._package_maker = self._factory.begin() self._package_maker.add_name('dummy_package', NameType.GENERIC_GEN_NAME) self._package_maker.set_version('0dummy0') return self._package_maker def __exit__(self, *rest: Any) -> None: assert (self._package_maker) self._package = self._package_maker.spawn('dummy_repo', 'dummy_family') def __getattr__(self, key: str) -> Any: assert (self._package) return getattr(self._package, key)
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: normalize_version = VersionStripper().strip_right('.p') for versionfile in walk_tree(path, name='package-version.txt'): pkgpath = os.path.dirname(versionfile) with factory.begin(pkgpath) as pkg: pkg.add_name(os.path.basename(pkgpath), NameType.SAGEMATH_NAME) projectname = os.path.basename(pkgpath) if os.path.exists(os.path.join(pkgpath, 'install-requires.txt')): projectname = 'python:' + projectname pkg.add_name(projectname, NameType.SAGEMATH_PROJECT_NAME) with open(versionfile) as fd: pkg.set_version(fd.read().strip(), normalize_version) if upstream_url := _parse_upstream_url(pkgpath): pkg.add_downloads( upstream_url.replace('VERSION', pkg.rawversion)) add_patch_files(pkg, os.path.join(pkgpath, 'patches'), '*.patch') yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: root = xml.etree.ElementTree.parse(path) for application in root.findall('application'): app = factory.begin() app.set_name(application.find('name').text) # type: ignore app.add_licenses(application.find('license').text) # type: ignore app.add_categories( application.find('category').text) # type: ignore app.add_homepages(application.find('web').text) # type: ignore app.set_extra_field('id', application.find('id').text) # type: ignore upstream_version_code = int( application.find('marketvercode').text) # type: ignore for package in application.findall('package'): version_code = int( package.find('versioncode').text) # type: ignore version = package.find('version').text # type: ignore if version: pkg = app.clone() pkg.set_version(version) pkg.set_flags(PackageFlags.devel if version_code > upstream_version_code else 0) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Generator[PackageMaker, None, None]: with open(path, encoding='utf-8') as indexfile: for line in indexfile: pkg = factory.begin() fields = line.strip().split('|') if len(fields) < 7: # varies pkg.log('skipping, unexpected number of fields {}'.format(len(fields)), severity=Logger.ERROR) continue pkgname = fields[0] # cut away string suffixes which come after version match = re.match('(.*?)(-[a-z_]+[0-9]*)+$', pkgname) if match: pkgname = match.group(1) pkg.set_name_and_version(pkgname, _normalize_version) pkg.set_summary(fields[3]) pkg.add_maintainers(extract_maintainers(fields[5])) pkg.add_categories(fields[6].split()) origin = fields[1].rsplit(',', 1)[0] pkg.set_origin(origin) pkg.set_extra_field('portname', origin.split('/')[1]) yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: for pkgname, pkglocation in _iterate_packages(path): pkg = factory.begin(pkgname) name, version, arch, rest = pkgname.rsplit('-', 3) pkg.set_name(name) pkg.set_version(version) pkg.set_extra_field('location', pkglocation) # Don't waste cycles: slackware repositories have no structure, # so we can't construct links to sources anyway #locationcomps = pkglocation.split('/') #if len(locationcomps) == 3: # pkg.set_extra_field('loc_subrepo', locationcomps[-2]) # pkg.set_extra_field('loc_category', locationcomps[-1]) #else: # pkg.log('unexpected location format: {}'.format(pkglocation), Logger.WARNING) # single letter garbage in slackware/, and has different # meaning in extras/ and patches/ #pkg.add_categories(pkglocation.split('/')[-1]) yield pkg
def iter_parse( self, path: str, factory: PackageFactory, transformer: PackageTransformer ) -> Generator[PackageMaker, None, None]: root = xml.etree.ElementTree.parse(path) for info in root.findall('./info'): with factory.begin() as pkg: nevra = nevra_parse(info.attrib['fn']) pkg.set_name(nevra[0]) pkg.set_version(nevra[2]) pkg.set_rawversion( nevra_construct(None, nevra[1], nevra[2], nevra[3])) # What we do here is we try to extract prerelease part # and mark version as ignored with non-trivial ROSAREV, # as it it likely a snapshot and trus cannot be trusted if not nevra[3].isdecimal(): pkg.set_flags(PackageFlags.ignore) match = re.search('\\b(a|alpha|b|beta|pre|rc)[0-9]+', nevra[3].lower()) if match: pkg._package.version += match.group( 0) # XXX: encapsulation violation pkg.add_homepages(info.attrib['url']) pkg.add_licenses(info.attrib['license']) yield pkg
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]: for category, pkgname in _iter_packages(path): with factory.begin(category + '/' + pkgname) as pkg: info_path = os.path.join(path, category, pkgname, pkgname + '.info') if not os.path.isfile(info_path): pkg.log('.info file does not exist', severity=Logger.ERROR) continue pkg.add_categories(category) variables = _parse_infofile(info_path) assert (variables['PRGNAM'] == pkgname) pkg.add_name(variables['PRGNAM'], NameType.SLACKBUILDS_NAME) pkg.add_name(category + '/' + pkgname, NameType.SLACKBUILDS_FULL_NAME) pkg.set_version(variables['VERSION']) pkg.add_homepages(variables['HOMEPAGE']) pkg.add_maintainers(extract_maintainers(variables['EMAIL'])) for key in ['DOWNLOAD', 'DOWNLOAD_x86_64']: if variables[key] not in ['', 'UNSUPPORTED', 'UNTESTED']: pkg.add_downloads(variables[key].split()) yield pkg
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: for pagefilename in os.listdir(path): if not pagefilename.endswith('.json'): continue pagepath = os.path.join(path, pagefilename) with open(pagepath, 'r', encoding='utf-8', errors='ignore') as pagedata: for crate in json.load(pagedata)['crates']: pkg = factory.begin() if crate['id'] != crate['name']: raise RuntimeError('id != name') pkg.add_name(crate['id'], NameType.CRATESIO_ID) pkg.set_version(crate['max_version']) pkg.set_summary(crate['description']) pkg.add_links(LinkType.UPSTREAM_HOMEPAGE, crate['homepage']) pkg.add_links(LinkType.UPSTREAM_REPOSITORY, crate['repository']) pkg.add_links(LinkType.UPSTREAM_DOCUMENTATION, crate['documentation']) yield pkg