def _augment_data_from_tarball(args, filename, data): docs_re = re.compile( "{0}-{1}\/((?:AUTHOR|ChangeLog|CHANGES|COPYING|LICENSE|NEWS|README).*)" .format(args.name, args.version), re.IGNORECASE) shell_metachars_re = re.compile("[|&;()<>\s]") data_archive = meta_utils.from_archive(filename) data.update(data_archive['data']) names = py2pack.utils._get_archive_filelist(filename) _canonicalize_setup_data(data) for name in names: match = re.match(docs_re, name) if match: if "doc_files" not in data: data["doc_files"] = [] if re.search(shell_metachars_re, match.group( 1)): # quote filename if it contains shell metacharacters data["doc_files"].append("'" + match.group(1) + "'") else: data["doc_files"].append(match.group(1)) # Very broad check for testsuites if "test" in name.lower(): data["testsuite"] = True
def _augment_data_from_tarball(args, filename, data): docs_re = re.compile( r"{0}-{1}\/((?:AUTHOR|ChangeLog|CHANGES|NEWS|README).*)".format( args.name, args.version), re.IGNORECASE) license_re = re.compile( r"{0}-{1}\/((?:COPYING|LICENSE).*)".format(args.name, args.version), re.IGNORECASE) data_archive = meta_utils.from_archive(filename) data.update(data_archive['data']) names = py2pack.utils._get_archive_filelist(filename) _canonicalize_setup_data(data) for name in names: match_docs = re.match(docs_re, name) match_license = re.match(license_re, name) if match_docs: if "doc_files" not in data: data["doc_files"] = [] data["doc_files"].append( _quote_shell_metacharacters(match_docs.group(1))) if match_license: if "license_files" not in data: data["license_files"] = [] data["license_files"].append( _quote_shell_metacharacters(match_license.group(1))) # Very broad check for testsuites if "test" in name.lower(): data["testsuite"] = True
def metadata(args): """extra the metadata from the given tarball""" warnings.warn("the 'metadata' commands is deprecated and will be removed " " in 2017. Please use directly the command 'metaextract' " "which is a requirement for py2pack", DeprecationWarning) data = meta_utils.from_archive(args.filename) print(json.dumps(data, indent=4, sort_keys=True))
def _augment_data_from_tarball(args, filename, data): docs_re = re.compile("{0}-{1}\/((?:AUTHOR|ChangeLog|CHANGES|COPYING|LICENSE|NEWS|README).*)".format(args.name, args.version), re.IGNORECASE) shell_metachars_re = re.compile("[|&;()<>\s]") data_archive = meta_utils.from_archive(filename) data.update(data_archive['data']) names = py2pack.utils._get_archive_filelist(filename) _canonicalize_setup_data(data) for name in names: match = re.match(docs_re, name) if match: if "doc_files" not in data: data["doc_files"] = [] if re.search(shell_metachars_re, match.group(1)): # quote filename if it contains shell metacharacters data["doc_files"].append("'" + match.group(1) + "'") else: data["doc_files"].append(match.group(1)) # Very broad check for testsuites if "test" in name.lower(): data["testsuite"] = True
def _augment_data_from_tarball(args, filename, data): docs_re = re.compile(r"{0}-{1}\/((?:AUTHOR|ChangeLog|CHANGES|NEWS|README).*)".format(args.name, args.version), re.IGNORECASE) license_re = re.compile(r"{0}-{1}\/((?:COPYING|LICENSE).*)".format(args.name, args.version), re.IGNORECASE) data_archive = meta_utils.from_archive(filename) data.update(data_archive['data']) names = py2pack.utils._get_archive_filelist(filename) _canonicalize_setup_data(data) for name in names: match_docs = re.match(docs_re, name) match_license = re.match(license_re, name) if match_docs: if "doc_files" not in data: data["doc_files"] = [] data["doc_files"].append(_quote_shell_metacharacters(match_docs.group(1))) if match_license: if "license_files" not in data: data["license_files"] = [] data["license_files"].append(_quote_shell_metacharacters(match_license.group(1))) # Very broad check for testsuites if "test" in name.lower(): data["testsuite"] = True
def test_from_archive(self, tararchive): tar_name, tar_files = tararchive data = meta_utils.from_archive(tar_name) assert data["data"]["install_requires"] == ['bar', 'foo']