def test_classifier(self): attrs = {'name': 'Boa', 'version': '3.0', 'classifiers': ['Programming Language :: Python :: 3']} dist = Distribution(attrs) self.assertEqual(dist.get_classifiers(), ['Programming Language :: Python :: 3']) meta = self.format_metadata(dist) self.assertIn('Metadata-Version: 1.1', meta)
def setup_mock(**attrs): """Mock the setup(**attrs) in order to retrieve metadata.""" # TODO use config and metadata instead of Distribution from distutils.dist import Distribution dist = Distribution(attrs) dist.parse_config_files() # 1. retrieve metadata fields that are quite similar in # PEP 314 and PEP 345 labels = (('name',) * 2, ('version',) * 2, ('author',) * 2, ('author_email',) * 2, ('maintainer',) * 2, ('maintainer_email',) * 2, ('description', 'summary'), ('long_description', 'description'), ('url', 'home_page'), ('platforms', 'platform'), ('provides', 'provides-dist'), ('obsoletes', 'obsoletes-dist'), ('requires', 'requires-dist')) get = lambda lab: getattr(dist.metadata, lab.replace('-', '_')) data.update((new, get(old)) for old, new in labels if get(old)) # 2. retrieve data that requires special processing data['classifier'].update(dist.get_classifiers() or []) data['scripts'].extend(dist.scripts or []) data['packages'].extend(dist.packages or []) data['modules'].extend(dist.py_modules or []) # 2.1 data_files -> resources if dist.data_files: if (len(dist.data_files) < 2 or isinstance(dist.data_files[1], str)): dist.data_files = [('', dist.data_files)] # add tokens in the destination paths vars = {'distribution.name': data['name']} path_tokens = sysconfig.get_paths(vars=vars).items() # sort tokens to use the longest one first path_tokens = sorted(path_tokens, key=lambda x: len(x[1])) for dest, srcs in (dist.data_files or []): dest = os.path.join(sys.prefix, dest) dest = dest.replace(os.path.sep, '/') for tok, path in path_tokens: path = path.replace(os.path.sep, '/') if not dest.startswith(path): continue dest = ('{%s}' % tok) + dest[len(path):] files = [('/ '.join(src.rsplit('/', 1)), dest) for src in srcs] data['resources'].extend(files) # 2.2 package_data data['package_data'] = dist.package_data.copy() # Use README file if its content is the desciption if "description" in data: ref = md5(re.sub('\s', '', self.data['description']).lower().encode()) ref = ref.digest() for readme in glob.glob('README*'): with open(readme, encoding='utf-8') as fp: contents = fp.read() contents = re.sub('\s', '', contents.lower()).encode() val = md5(contents).digest() if val == ref: del data['description'] data['description-file'] = readme break
def setup_mock(**attrs): """Mock the setup(**attrs) in order to retrieve metadata.""" # TODO use config and metadata instead of Distribution from distutils.dist import Distribution dist = Distribution(attrs) dist.parse_config_files() # 1. retrieve metadata fields that are quite similar in # PEP 314 and PEP 345 labels = (('name', ) * 2, ('version', ) * 2, ('author', ) * 2, ('author_email', ) * 2, ('maintainer', ) * 2, ('maintainer_email', ) * 2, ('description', 'summary'), ('long_description', 'description'), ('url', 'home_page'), ('platforms', 'platform'), ('provides', 'provides-dist'), ('obsoletes', 'obsoletes-dist'), ('requires', 'requires-dist')) get = lambda lab: getattr(dist.metadata, lab.replace('-', '_')) data.update((new, get(old)) for old, new in labels if get(old)) # 2. retrieve data that requires special processing data['classifier'].update(dist.get_classifiers() or []) data['scripts'].extend(dist.scripts or []) data['packages'].extend(dist.packages or []) data['modules'].extend(dist.py_modules or []) # 2.1 data_files -> resources if dist.data_files: if (len(dist.data_files) < 2 or isinstance(dist.data_files[1], str)): dist.data_files = [('', dist.data_files)] # add tokens in the destination paths vars = {'distribution.name': data['name']} path_tokens = sysconfig.get_paths(vars=vars).items() # sort tokens to use the longest one first path_tokens = sorted(path_tokens, key=lambda x: len(x[1])) for dest, srcs in (dist.data_files or []): dest = os.path.join(sys.prefix, dest) dest = dest.replace(os.path.sep, '/') for tok, path in path_tokens: path = path.replace(os.path.sep, '/') if not dest.startswith(path): continue dest = ('{%s}' % tok) + dest[len(path):] files = [('/ '.join(src.rsplit('/', 1)), dest) for src in srcs] data['resources'].extend(files) # 2.2 package_data data['package_data'] = dist.package_data.copy() # Use README file if its content is the desciption if "description" in data: ref = md5( re.sub('\s', '', self.data['description']).lower().encode()) ref = ref.digest() for readme in glob.glob('README*'): with open(readme, encoding='utf-8') as fp: contents = fp.read() contents = re.sub('\s', '', contents.lower()).encode() val = md5(contents).digest() if val == ref: del data['description'] data['description-file'] = readme break
def setup(**attrs): """Mock the setup(**attrs) in order to retrive metadata.""" # use the distutils v1 processings to correctly parse metadata. #XXX we could also use the setuptools distibution ??? from distutils.dist import Distribution dist = Distribution(attrs) dist.parse_config_files() # 1. retrieves metadata that are quite similar PEP314<->PEP345 labels = (('name', ) * 2, ('version', ) * 2, ('author', ) * 2, ('author_email', ) * 2, ('maintainer', ) * 2, ('maintainer_email', ) * 2, ('description', 'summary'), ('long_description', 'description'), ('url', 'home_page'), ('platforms', 'platform')) if sys.version[:3] >= '2.5': labels += ( ('provides', 'provides-dist'), ('obsoletes', 'obsoletes-dist'), ('requires', 'requires-dist'), ) get = lambda lab: getattr(dist.metadata, lab.replace('-', '_')) data.update((new, get(old)) for (old, new) in labels if get(old)) # 2. retrieves data that requires special processings. data['classifier'].update(dist.get_classifiers() or []) data['scripts'].extend(dist.scripts or []) data['packages'].extend(dist.packages or []) data['modules'].extend(dist.py_modules or []) # 2.1 data_files -> resources. if dist.data_files: if len(dist.data_files) < 2 or \ isinstance(dist.data_files[1], str): dist.data_files = [('', dist.data_files)] # add tokens in the destination paths vars = {'distribution.name': data['name']} path_tokens = sysconfig.get_paths(vars=vars).items() # sort tokens to use the longest one first # TODO chain two sorted with key arguments, remove cmp path_tokens.sort(cmp=lambda x, y: cmp(len(y), len(x)), key=lambda x: x[1]) for dest, srcs in (dist.data_files or []): dest = os.path.join(sys.prefix, dest) for tok, path in path_tokens: if dest.startswith(path): dest = ('{%s}' % tok) + dest[len(path):] files = [('/ '.join(src.rsplit('/', 1)), dest) for src in srcs] data['resources'].extend(files) continue # 2.2 package_data -> extra_files package_dirs = dist.package_dir or {} for package, extras in dist.package_data.iteritems() or []: package_dir = package_dirs.get(package, package) files = [os.path.join(package_dir, f) for f in extras] data['extra_files'].extend(files) # Use README file if its content is the desciption if "description" in data: ref = md5(re.sub('\s', '', self.data['description']).lower()) ref = ref.digest() for readme in glob.glob('README*'): fp = open(readme) try: contents = fp.read() finally: fp.close() val = md5(re.sub('\s', '', contents.lower())).digest() if val == ref: del data['description'] data['description-file'] = readme break
def setup(**attrs): """Mock the setup(**attrs) in order to retrive metadata.""" # use the distutils v1 processings to correctly parse metadata. #XXX we could also use the setuptools distibution ??? from distutils.dist import Distribution dist = Distribution(attrs) dist.parse_config_files() # 1. retrieves metadata that are quite similar PEP314<->PEP345 labels = (('name',) * 2, ('version',) * 2, ('author',) * 2, ('author_email',) * 2, ('maintainer',) * 2, ('maintainer_email',) * 2, ('description', 'summary'), ('long_description', 'description'), ('url', 'home_page'), ('platforms', 'platform')) if sys.version[:3] >= '2.5': labels += (('provides', 'provides-dist'), ('obsoletes', 'obsoletes-dist'), ('requires', 'requires-dist'),) get = lambda lab: getattr(dist.metadata, lab.replace('-', '_')) data.update((new, get(old)) for (old, new) in labels if get(old)) # 2. retrieves data that requires special processings. data['classifier'].update(dist.get_classifiers() or []) data['scripts'].extend(dist.scripts or []) data['packages'].extend(dist.packages or []) data['modules'].extend(dist.py_modules or []) # 2.1 data_files -> resources. if dist.data_files: if len(dist.data_files) < 2 or \ isinstance(dist.data_files[1], str): dist.data_files = [('', dist.data_files)] # add tokens in the destination paths vars = {'distribution.name': data['name']} path_tokens = sysconfig.get_paths(vars=vars).items() # sort tokens to use the longest one first # TODO chain two sorted with key arguments, remove cmp path_tokens.sort(cmp=lambda x, y: cmp(len(y), len(x)), key=lambda x: x[1]) for dest, srcs in (dist.data_files or []): dest = os.path.join(sys.prefix, dest) for tok, path in path_tokens: if dest.startswith(path): dest = ('{%s}' % tok) + dest[len(path):] files = [('/ '.join(src.rsplit('/', 1)), dest) for src in srcs] data['resources'].extend(files) continue # 2.2 package_data -> extra_files package_dirs = dist.package_dir or {} for package, extras in dist.package_data.iteritems() or []: package_dir = package_dirs.get(package, package) files = [os.path.join(package_dir, f) for f in extras] data['extra_files'].extend(files) # Use README file if its content is the desciption if "description" in data: ref = md5(re.sub('\s', '', self.data['description']).lower()) ref = ref.digest() for readme in glob.glob('README*'): fp = open(readme) try: contents = fp.read() finally: fp.close() val = md5(re.sub('\s', '', contents.lower())).digest() if val == ref: del data['description'] data['description-file'] = readme break