def test_new_vs_previous_guesses_match():
    """Test cases where new and deprecated functions match"""

    cens = "GPL (>= 3)"
    fam = guess_license_family(cens)
    assert fam == "GPL3"
    prev = deprecated_guess_license_family(cens)
    assert fam == prev, "new and deprecated guesses differ"

    cens = "GNU Lesser General Public License"
    fam = guess_license_family(cens)
    assert fam == "LGPL", "guess_license_family({}) is {}".format(cens, fam)
    prev = deprecated_guess_license_family(cens)
    assert fam == prev, "new and deprecated guesses differ"

    cens = "GNU General Public License some stuff then a 3 then stuff"
    fam = guess_license_family(cens)
    assert fam == "GPL3", "guess_license_family({}) is {}".format(cens, fam)
    prev = deprecated_guess_license_family(cens)
    assert fam == prev, "new and deprecated guesses differ"

    cens = "Affero GPL"
    fam = guess_license_family(cens)
    assert fam == "AGPL", "guess_license_family({}) is {}".format(cens, fam)
    prev = deprecated_guess_license_family(cens)
    assert fam == prev, "new and deprecated guesses differ"
示例#2
0
def test_lgpl():
    licenses = {'GNU Lesser General Public License (LGPL)', 'LGPL-2.1',
                'LGPL-2', 'LGPL-3', 'LGPL (>= 2)',
                'BSD License and GNU Library or Lesser General Public License (LGPL)'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == 'LGPL'
示例#3
0
def test_gpl3():
    licenses = {u'GPL 3', u'GPL-3', u'GPL-3 | file LICENSE',
                u'GPL-2 | GPL-3 | file LICENSE', u'GPL (>= 3) | file LICENCE',
                u'GPL (>= 2)', u'GPL-2 | GPL-3', u'GPL (>= 2) | file LICENSE'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'GPL3'
def test_old_warnings_no_longer_fail():
    # the following previously threw warnings. Came from r/linux-64
    warnings = {
        u'MIT License', u'GNU Lesser General Public License (LGPL)',
        u'GPL-2 | GPL-3 | file LICENSE', u'GPL (>= 3) | file LICENCE',
        u'BSL-1.0', u'GPL (>= 2)', u'file LICENSE (FOSS)',
        u'Open Source (http://www.libpng.org/pub/png/src/libpng-LICENSE.txt)',
        u'MIT + file LICENSE', u'GPL-2 | GPL-3', u'GPL (>= 2) | file LICENSE',
        u'Unlimited', u'GPL-3 | file LICENSE',
        u'GNU General Public License v2 or later (GPLv2+)', u'LGPL-2.1',
        u'LGPL-2', u'LGPL-3', u'GPL',
        u'zlib (http://zlib.net/zlib_license.html)',
        u'Free software (X11 License)', u'Custom free software license',
        u'Old MIT', u'GPL 3', u'Apache License (== 2.0)', u'GPL (>= 3)', None,
        u'LGPL (>= 2)', u'BSD_2_clause + file LICENSE', u'GPL-3', u'GPL-2',
        u'BSD License and GNU Library or Lesser General Public License (LGPL)',
        u'GPL-2 | file LICENSE', u'BSD_3_clause + file LICENSE', u'CC0',
        u'MIT + file LICENSE | Unlimited', u'Apache License 2.0',
        u'BSD License', u'Lucent Public License'
    }

    for cens in warnings:
        fam = guess_license_family(cens)
        print('{}:{}'.format(cens, fam))
        assert fam in allowed_license_families
def test_unlimited():
    """The following is an unfortunate case where MIT is in UNLIMITED

    We could add words to filter out, but it would be hard to keep track of...
    """
    cens = u'Unlimited'
    assert guess_license_family(cens) == 'MIT'
示例#6
0
def test_not_gpl2():
    licenses = {'GPL (>= 2)', 'LGPL (>= 2)', 'GPL',
                'LGPL-3', 'GPL 3', 'GPL (>= 3)',
                'Apache License (== 2.0)'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam != 'GPL2'
示例#7
0
def test_lgpl():
    licenses = {u'GNU Lesser General Public License (LGPL)', u'LGPL-2.1',
                u'LGPL-2', u'LGPL-3', u'LGPL (>= 2)',
                u'BSD License and GNU Library or Lesser General Public License (LGPL)'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'LGPL'
示例#8
0
def test_gpl3():
    licenses = {'GPL 3', 'GPL-3', 'GPL-3 | file LICENSE',
                'GPL-2 | GPL-3 | file LICENSE', 'GPL (>= 3) | file LICENCE',
                'GPL (>= 2)', 'GPL-2 | GPL-3', 'GPL (>= 2) | file LICENSE'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == 'GPL3'
示例#9
0
def test_not_gpl2():
    licenses = {u'GPL (>= 2)', u'LGPL (>= 2)', u'GPL',
                u'LGPL-3', u'GPL 3', u'GPL (>= 3)',
                u'Apache License (== 2.0)'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam != u'GPL2'
示例#10
0
def test_unlimited():
    """The following is an unfortunate case where MIT is in UNLIMITED

    We could add words to filter out, but it would be hard to keep track of...
    """
    cens = u'Unlimited'
    assert guess_license_family(cens) == 'MIT'
示例#11
0
def test_gpl2():
    licenses = {
        u'GPL-2', u'GPL-2 | file LICENSE',
        u'GNU General Public License v2 or later (GPLv2+)'
    }
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'GPL2'
示例#12
0
def test_other():
    licenses = {u'file LICENSE (FOSS)',
                u'Open Source (http://www.libpng.org/pub/png/src/libpng-LICENSE.txt)',
                u'zlib (http://zlib.net/zlib_license.html)',
                u'Free software (X11 License)', u'Custom free software license'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'OTHER'
示例#13
0
def test_other():
    licenses = {'file LICENSE (FOSS)',
                'Open Source (http://www.libpng.org/pub/png/src/libpng-LICENSE.txt)',
                'zlib (http://zlib.net/zlib_license.html)',
                'Free software (X11 License)', 'Custom free software license'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == 'OTHER'
示例#14
0
def test_new_vs_previous_guesses_match():
    """Test cases where new and deprecated functions match"""

    cens = "GPL (>= 3)"
    fam = guess_license_family(cens)
    assert fam == 'GPL3'

    cens = 'GNU Lesser General Public License'
    fam = guess_license_family(cens)
    assert fam == 'LGPL', 'guess_license_family({}) is {}'.format(cens, fam)

    cens = 'GNU General Public License some stuff then a 3 then stuff'
    fam = guess_license_family(cens)
    assert fam == 'GPL3', 'guess_license_family({}) is {}'.format(cens, fam)

    cens = 'Affero GPL'
    fam = guess_license_family(cens)
    assert fam == 'AGPL', 'guess_license_family({}) is {}'.format(cens, fam)
示例#15
0
def test_new_vs_previous_guesses_match():
    """Test cases where new and deprecated functions match"""

    cens = "GPL (>= 3)"
    fam = guess_license_family(cens)
    assert fam == 'GPL3'

    cens = 'GNU Lesser General Public License'
    fam = guess_license_family(cens)
    assert fam == 'LGPL', 'guess_license_family({}) is {}'.format(cens, fam)

    cens = 'GNU General Public License some stuff then a 3 then stuff'
    fam = guess_license_family(cens)
    assert fam == 'GPL3', 'guess_license_family({}) is {}'.format(cens, fam)

    cens = 'Affero GPL'
    fam = guess_license_family(cens)
    assert fam == 'AGPL', 'guess_license_family({}) is {}'.format(cens, fam)
示例#16
0
def test_new_vs_previous_guess_differ_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL'
    New guess is GPL, which is an allowed family, hence the most accurate.
    Previously, GPL3 was chosen over GPL
    """
    cens = "GPL"
    fam = guess_license_family(cens)
    assert fam == 'GPL'
示例#17
0
def test_new_vs_previous_guess_differ_multiple_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL-2 | GPL-3 | file LICENSE'
    New guess is GPL-3, which is the most accurate.
    Previously, somehow Public-Domain is closer than GPL2 or GPL3!
    """
    cens = 'GPL-2 | GPL-3 | file LICENSE'
    fam = guess_license_family(cens)
    assert fam == 'GPL3', f'guess_license_family_from_index({cens}) is {fam}'
示例#18
0
def test_new_vs_previous_guess_differ_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL'
    New guess is GPL, which is an allowed family, hence the most accurate.
    Previously, GPL3 was chosen over GPL
    """
    cens = "GPL"
    fam = guess_license_family(cens)
    assert fam == 'GPL'
示例#19
0
def test_new_vs_previous_guess_differ_multiple_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL-2 | GPL-3 | file LICENSE'
    New guess is GPL-3, which is the most accurate.
    Previously, somehow Public-Domain is closer than GPL2 or GPL3!
    """
    cens = u'GPL-2 | GPL-3 | file LICENSE'
    fam = guess_license_family(cens)
    assert fam == 'GPL3', 'guess_license_family_from_index({}) is {}'.format(cens, fam)
示例#20
0
def get_package_metadata(package, metadata, data, output_dir, python_version,
                         all_extras, recursive, created_recipes, noarch_python,
                         no_prompt, packages, extra_specs, config,
                         setup_options):

    print("Downloading %s" % package)
    print("PyPI URL: ", metadata['pypiurl'])
    pkginfo = get_pkginfo(package,
                          filename=metadata['filename'],
                          pypiurl=metadata['pypiurl'],
                          digest=metadata['digest'],
                          python_version=python_version,
                          extra_specs=extra_specs,
                          setup_options=setup_options,
                          config=config)

    metadata.update(get_entry_points(pkginfo))

    requires = get_requirements(package, pkginfo, all_extras=all_extras)

    if requires or is_setuptools_enabled(pkginfo):
        list_deps = get_dependencies(requires, is_setuptools_enabled(pkginfo))

        metadata['build_depends'] = ['pip'] + list_deps
        # Never add setuptools to runtime dependencies.
        metadata['run_depends'] = list_deps

        if recursive:
            packages += get_recursive_deps(created_recipes, list_deps,
                                           output_dir)

    if 'packagename' not in metadata:
        metadata['packagename'] = pkginfo['name'].lower()

    if metadata['version'] == 'UNKNOWN':
        metadata['version'] = pkginfo['version']

    metadata["import_tests"] = get_import_tests(pkginfo,
                                                metadata.get("import_tests"))
    metadata['tests_require'] = get_tests_require(pkginfo)

    metadata["home"] = get_home(pkginfo, data)

    if not metadata.get("summary"):
        metadata["summary"] = get_summary(pkginfo)
        metadata["summary"] = get_summary(pkginfo)

    license_name = get_license_name(package, pkginfo, no_prompt, data)
    metadata["license"] = clean_license_name(license_name)
    metadata['license_family'] = guess_license_family(
        license_name, allowed_license_families)

    if 'new_hash_value' in pkginfo:
        metadata['digest'] = pkginfo['new_hash_value']
示例#21
0
def test_lgpl():
    licenses = {
        u"GNU Lesser General Public License (LGPL)",
        u"LGPL-2.1",
        u"LGPL-2",
        u"LGPL-3",
        u"LGPL (>= 2)",
        u"BSD License and GNU Library or Lesser General Public License (LGPL)",
    }
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u"LGPL"
示例#22
0
def test_other():
    licenses = {
        u"file LICENSE (FOSS)",
        u"CC0",
        u"Open Source (http://www.libpng.org/pub/png/src/libpng-LICENSE.txt)",
        u"zlib (http://zlib.net/zlib_license.html)",
        u"Free software (X11 License)",
        u"Custom free software license",
    }
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u"OTHER"
示例#23
0
def test_new_vs_previous_guess_differ_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL'
    New guess is GPL, which is an allowed family, hence the most accurate.
    Previously, GPL3 was chosen over GPL
    """
    cens = "GPL"
    fam = guess_license_family(cens)
    assert fam == 'GPL'
    prev = deprecated_guess_license_family(cens)
    assert fam != prev, 'new and deprecated guesses are unexpectedly the same'
    assert prev == 'GPL3'  # bizarre when GPL is an allowed license family
示例#24
0
def test_new_vs_previous_guess_differ_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL'
    New guess is GPL, which is an allowed family, hence the most accurate.
    Previously, GPL3 was chosen over GPL
    """
    cens = "GPL"
    fam = guess_license_family(cens)
    assert fam == "GPL"
    prev = deprecated_guess_license_family(cens)
    assert fam != prev, "new and deprecated guesses are unexpectedly the same"
    assert prev == "GPL3"  # bizarre when GPL is an allowed license family
示例#25
0
def test_new_vs_previous_guess_differ_multiple_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL-2 | GPL-3 | file LICENSE'
    New guess is GPL-3, which is the most accurate.
    Previously, somehow PUBLICDOMAIN is closer than GPL2 or GPL3!
    """
    cens = u"GPL-2 | GPL-3 | file LICENSE"
    fam = guess_license_family(cens)
    assert fam == "GPL3", "guess_license_family_from_index({}) is {}".format(cens, fam)
    prev = deprecated_guess_license_family(cens)
    assert fam != prev, "new and deprecated guesses are unexpectedly the same"
    assert prev == "PUBLICDOMAIN"
示例#26
0
def remap_license(rpm_license):
    mapping = {'lgplv2+': 'LGPL (>= 2)',
               'gplv2+': 'GPL (>= 2)',
               'public domain (uncopyrighted)': 'Public-Domain',
               'public domain': 'Public-Domain',
               'mit/x11': 'MIT',
               'the open group license': 'The Open Group License'}
    l_rpm_license = rpm_license.lower()
    if l_rpm_license in mapping:
        license, family = mapping[l_rpm_license], guess_license_family(mapping[l_rpm_license])
    else:
        license, family = rpm_license, guess_license_family(rpm_license)
    # Yuck:
    if family == 'APACHE':
        family = 'Apache'
    elif family == 'PUBLIC-DOMAIN':
        family = 'Public-Domain'
    elif family == 'PROPRIETARY':
        family = 'Proprietary'
    elif family == 'OTHER':
        family = 'Other'
    return license, family
示例#27
0
文件: rpm.py 项目: cav71/conda-build
def remap_license(rpm_license):
    mapping = {'lgplv2+': 'LGPL (>= 2)',
               'gplv2+': 'GPL (>= 2)',
               'public domain (uncopyrighted)': 'Public-Domain',
               'public domain': 'Public-Domain',
               'mit/x11': 'MIT',
               'the open group license': 'The Open Group License'}
    l_rpm_license = rpm_license.lower()
    if l_rpm_license in mapping:
        license, family = mapping[l_rpm_license], guess_license_family(mapping[l_rpm_license])
    else:
        license, family = rpm_license, guess_license_family(rpm_license)
    # Yuck:
    if family == 'APACHE':
        family = 'Apache'
    elif family == 'PUBLIC-DOMAIN':
        family = 'Public-Domain'
    elif family == 'PROPRIETARY':
        family = 'Proprietary'
    elif family == 'OTHER':
        family = 'Other'
    return license, family
示例#28
0
def test_gpl3():
    licenses = {
        u"GPL 3",
        u"GPL-3",
        u"GPL-3 | file LICENSE",
        u"GPL-2 | GPL-3 | file LICENSE",
        u"GPL (>= 3) | file LICENCE",
        u"GPL (>= 2)",
        u"GPL-2 | GPL-3",
        u"GPL (>= 2) | file LICENSE",
    }
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u"GPL3"
示例#29
0
def test_new_vs_previous_guess_differ_multiple_gpl():
    """Test cases where new and deprecated functions differ

    license = 'GPL-2 | GPL-3 | file LICENSE'
    New guess is GPL-3, which is the most accurate.
    Previously, somehow PUBLICDOMAIN is closer than GPL2 or GPL3!
    """
    cens = u'GPL-2 | GPL-3 | file LICENSE'
    fam = guess_license_family(cens)
    assert fam == 'GPL3', 'guess_license_family_from_index({}) is {}'.format(
        cens, fam)
    prev = deprecated_guess_license_family(cens)
    assert fam != prev, 'new and deprecated guesses are unexpectedly the same'
    assert prev == 'PUBLICDOMAIN'
示例#30
0
def test_old_warnings_no_longer_fail():
    # the following previously threw warnings. Came from r/linux-64
    warnings = {
        u"MIT License",
        u"GNU Lesser General Public License (LGPL)",
        u"GPL-2 | GPL-3 | file LICENSE",
        u"GPL (>= 3) | file LICENCE",
        u"BSL-1.0",
        u"GPL (>= 2)",
        u"file LICENSE (FOSS)",
        u"Open Source (http://www.libpng.org/pub/png/src/libpng-LICENSE.txt)",
        u"MIT + file LICENSE",
        u"GPL-2 | GPL-3",
        u"GPL (>= 2) | file LICENSE",
        u"Unlimited",
        u"GPL-3 | file LICENSE",
        u"GNU General Public License v2 or later (GPLv2+)",
        u"LGPL-2.1",
        u"LGPL-2",
        u"LGPL-3",
        u"GPL",
        u"zlib (http://zlib.net/zlib_license.html)",
        u"Free software (X11 License)",
        u"Custom free software license",
        u"Old MIT",
        u"GPL 3",
        u"Apache License (== 2.0)",
        u"GPL (>= 3)",
        None,
        u"LGPL (>= 2)",
        u"BSD_2_clause + file LICENSE",
        u"GPL-3",
        u"GPL-2",
        u"BSD License and GNU Library or Lesser General Public License (LGPL)",
        u"GPL-2 | file LICENSE",
        u"BSD_3_clause + file LICENSE",
        u"CC0",
        u"MIT + file LICENSE | Unlimited",
        u"Apache License 2.0",
        u"BSD License",
        u"Lucent Public License",
    }

    for cens in warnings:
        fam = guess_license_family(cens)
        print("{}:{}".format(cens, fam))
        assert fam in allowed_license_families
示例#31
0
def test_old_warnings_no_longer_fail():
    # the following previously threw warnings. Came from r/linux-64
    warnings = {u'MIT License', u'GNU Lesser General Public License (LGPL)',
         u'GPL-2 | GPL-3 | file LICENSE', u'GPL (>= 3) | file LICENCE',
         u'BSL-1.0', u'GPL (>= 2)', u'file LICENSE (FOSS)',
         u'Open Source (http://www.libpng.org/pub/png/src/libpng-LICENSE.txt)',
         u'MIT + file LICENSE', u'GPL-2 | GPL-3', u'GPL (>= 2) | file LICENSE',
         u'Unlimited', u'GPL-3 | file LICENSE',
         u'GNU General Public License v2 or later (GPLv2+)', u'LGPL-2.1',
         u'LGPL-2', u'LGPL-3', u'GPL',
         u'zlib (http://zlib.net/zlib_license.html)',
         u'Free software (X11 License)', u'Custom free software license',
         u'Old MIT', u'GPL 3', u'Apache License (== 2.0)', u'GPL (>= 3)', None,
         u'LGPL (>= 2)', u'BSD_2_clause + file LICENSE', u'GPL-3', u'GPL-2',
         u'BSD License and GNU Library or Lesser General Public License (LGPL)',
         u'GPL-2 | file LICENSE', u'BSD_3_clause + file LICENSE', u'CC0',
         u'MIT + file LICENSE | Unlimited', u'Apache License 2.0',
         u'BSD License', u'Lucent Public License'}

    for cens in warnings:
        fam = guess_license_family(cens)
        print('{}:{}'.format(cens, fam))
        assert fam in allowed_license_families
示例#32
0
def get_package_metadata(package, d, data, output_dir, python_version,
                         all_extras, recursive, created_recipes, noarch_python,
                         noprompt, packages, extra_specs, config,
                         setup_options):

    print("Downloading %s" % package)
    print("PyPI URL: ", d['pypiurl'])
    pkginfo = get_pkginfo(package,
                          filename=d['filename'],
                          pypiurl=d['pypiurl'],
                          digest=d['digest'],
                          python_version=python_version,
                          extra_specs=extra_specs,
                          setup_options=setup_options,
                          config=config)

    setuptools_run = False

    # Look at the entry_points and construct console_script and
    #  gui_scripts entry_points for conda
    entry_points = pkginfo.get('entry_points', [])
    if entry_points:
        if isinstance(entry_points, str):
            # makes sure it is left-shifted
            newstr = "\n".join(x.strip() for x in entry_points.splitlines())
            _config = configparser.ConfigParser()
            entry_points = {}
            try:
                _config.readfp(StringIO(newstr))
            except Exception as err:
                print("WARNING: entry-points not understood: ", err)
                print("The string was", newstr)
                entry_points = pkginfo['entry_points']
            else:
                setuptools_run = True
                for section in _config.sections():
                    if section in ['console_scripts', 'gui_scripts']:
                        value = [
                            '%s=%s' % (option, _config.get(section, option))
                            for option in _config.options(section)
                        ]
                        entry_points[section] = value
        if not isinstance(entry_points, dict):
            print("WARNING: Could not add entry points. They were:")
            print(entry_points)
        else:
            cs = entry_points.get('console_scripts', [])
            gs = entry_points.get('gui_scripts', [])
            if isinstance(cs, string_types):
                cs = [cs]
            elif cs and isinstance(cs, list) and isinstance(cs[0], list):
                # We can have lists of lists here
                cs = [item for sublist in [s for s in cs] for item in sublist]
            if isinstance(gs, string_types):
                gs = [gs]
            elif gs and isinstance(gs, list) and isinstance(gs[0], list):
                gs = [item for sublist in [s for s in gs] for item in sublist]
            # We have *other* kinds of entry-points so we need
            # setuptools at run-time
            if set(entry_points.keys()) - {'console_scripts', 'gui_scripts'}:
                setuptools_run = True
            # TODO: Use pythonw for gui scripts
            entry_list = (cs + gs)
            if len(cs + gs) != 0:
                d['entry_points'] = entry_list
                d['test_commands'] = make_entry_tests(entry_list)

    requires = get_requirements(package, pkginfo, all_extras=all_extras)

    if requires or setuptools_run:
        deps = []
        if setuptools_run:
            deps.append('setuptools')
        for deptext in requires:
            if isinstance(deptext, string_types):
                deptext = deptext.splitlines()
            # Every item may be a single requirement
            #  or a multiline requirements string...
            for dep in deptext:
                # ... and may also contain comments...
                dep = dep.split('#')[0].strip()
                if dep:  # ... and empty (or comment only) lines
                    dep, marker = parse_dep_with_env_marker(dep)
                    spec = spec_from_line(dep)
                    if spec is None:
                        sys.exit("Error: Could not parse: %s" % dep)
                    if '~' in spec:
                        version = spec.split()[-1]
                        tilde_version = '~ {}'.format(version)
                        pin_compatible = convert_version(version)
                        spec = spec.replace(tilde_version, pin_compatible)
                    if marker:
                        spec = ' '.join((spec, marker))
                    deps.append(spec)

        if 'setuptools' in deps:
            setuptools_run = False
        d['build_depends'] = ['pip'] + deps
        # Never add setuptools to runtime dependencies.
        d['run_depends'] = deps

        if recursive:
            for dep in deps:
                dep = dep.split()[0]
                if not exists(join(output_dir, dep)):
                    if dep not in created_recipes:
                        packages.append(dep)

    if 'packagename' not in d:
        d['packagename'] = pkginfo['name'].lower()
    if d['version'] == 'UNKNOWN':
        d['version'] = pkginfo['version']

    if pkginfo.get('packages'):
        deps = set(pkginfo['packages'])
        if d['import_tests']:
            if not d['import_tests'] or d['import_tests'] == 'PLACEHOLDER':
                olddeps = []
            else:
                olddeps = [x for x in d['import_tests'].split() if x != '-']
            deps = set(olddeps) | deps
        d['import_tests'] = sorted(deps)

        d['tests_require'] = sorted([
            spec_from_line(pkg)
            for pkg in ensure_list(pkginfo['tests_require'])
        ])

    if pkginfo.get('home'):
        d['home'] = pkginfo['home']
    else:
        if data and 'home' in data:
            d['home'] = data['home']
        else:
            d['home'] = "The package home page"

    if pkginfo.get('summary'):
        if 'summary' in d and not d['summary']:
            # Need something here, use what the package had
            d['summary'] = pkginfo['summary']
    else:
        d['summary'] = "Summary of the package"

    license_classifier = "License :: OSI Approved :: "
    if pkginfo.get('classifiers'):
        licenses = [
            classifier.split(license_classifier, 1)[1]
            for classifier in pkginfo['classifiers']
            if classifier.startswith(license_classifier)
        ]
    elif data and 'classifiers' in data:
        licenses = [
            classifier.split(license_classifier, 1)[1]
            for classifier in data['classifiers']
            if classifier.startswith(license_classifier)
        ]
    else:
        licenses = []
    if not licenses:
        if pkginfo.get('license'):
            license_name = pkginfo['license']
        elif data and 'license' in data:
            license_name = data['license']
        else:
            license_name = None
        if license_name:
            if noprompt:
                pass
            elif '\n' not in license_name:
                print('Using "%s" for the license' % license_name)
            else:
                # Some projects put the whole license text in this field
                print("This is the license for %s" % package)
                print()
                print(license_name)
                print()
                license_name = input("What license string should I use? ")
        else:
            if noprompt:
                license_name = "UNKNOWN"
            else:
                license_name = input(
                    ("No license could be found for %s on " +
                     "PyPI or in the source. What license should I use? ") %
                    package)
    else:
        license_name = ' or '.join(licenses)
    # remove the word license from the license
    clean_license_name = re.subn('(.*)\s+license',
                                 r'\1',
                                 license_name,
                                 flags=re.IGNORECASE)[0]
    d['license'] = clean_license_name
    d['license_family'] = guess_license_family(license_name,
                                               allowed_license_families)
    if 'new_hash_value' in pkginfo:
        d['digest'] = pkginfo['new_hash_value']
示例#33
0
def test_mit():
    licenses = {u"MIT License", u"MIT + file LICENSE", u"Old MIT"}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u"MIT"
示例#34
0
def test_cc():
    fam = guess_license_family(u'CC0')
    assert fam == u'CC'
def main():

    is_github_url = False

    this_dir = os.getcwd()

    # Unpack
    config = Config()
    cran_metadata = {}
    # Some packages are missing on some systems. Need to mark them so they get skipped.
    to_be_packaged = set()
    with TemporaryDirectory() as merged_td:
        for platform, details in sources.items():
            with TemporaryDirectory() as td:
                os.chdir(td)
                libdir = None
                # libarchive cannot handle the .exe, just skip it. Means we cannot figure out packages that are not available
                # for Windows.
                if platform == 'win_no':
                    details['cached_as'], sha256 = cache_file(
                        config.src_cache, details['url'], details['fn'],
                        details['sha'])
                    libarchive.extract_file(details['cached_as'])
                    libdir = os.path.join(td, details['library'])
                    library = os.listdir(libdir)
                    print(library)
                    details['to_be_packaged'] = set(library) - set(
                        R_BASE_PACKAGE_NAMES)
                elif platform == 'linux':
                    details['cached_as'], sha256 = cache_file(
                        config.src_cache, details['url'], details['fn'],
                        details['sha'])
                    libarchive.extract_file(details['cached_as'])
                    import glob
                    for filename in glob.iglob('**/*.rpm', recursive=True):
                        print(filename)
                        libarchive.extract_file(filename)
                    libdir = os.path.join(td, details['library'])
                    library = os.listdir(libdir)
                    print(library)
                    details['to_be_packaged'] = set(library) - set(
                        R_BASE_PACKAGE_NAMES)
                elif platform == 'mac':
                    details['cached_as'], sha256 = cache_file(
                        config.src_cache, details['url'], details['fn'],
                        details['sha'])
                    os.system("bsdtar -xf {}".format(details['cached_as']))
                    payloads = glob.glob('./**/Payload', recursive=True)
                    print(payloads)
                    for payload in payloads:
                        libarchive.extract_file(payload)
                    libdir = os.path.join(td, details['library'])
                    library = os.listdir(libdir)
                    print(library)
                    details['to_be_packaged'] = set(library) - set(
                        R_BASE_PACKAGE_NAMES)
                if libdir:
                    distutils.dir_util.copy_tree(libdir, merged_td)
        os.chdir(merged_td)
        libdir = merged_td
        # Fudge until we can unpack the Windows installer .exe on Linux
        sources['win']['to_be_packaged'] = sources['linux']['to_be_packaged']

        # Get the superset of all packages (note we will no longer have the DESCRIPTION?!)
        for platform, details in sources.items():
            if 'to_be_packaged' in details:
                to_be_packaged.update(details['to_be_packaged'])

        package_dicts = {}
        for package in sorted(list(to_be_packaged)):
            p = os.path.join(libdir, package, "DESCRIPTION")
            with open(p) as cran_description:
                description_text = cran_description.read()
                d = dict_from_cran_lines(
                    remove_package_line_continuations(
                        description_text.splitlines()))
                d['orig_description'] = description_text
                package = d['Package'].lower()
                cran_metadata[package] = d

            # Make sure package always uses the CRAN capitalization
            package = cran_metadata[package.lower()]['Package']
            package_dicts[package.lower()] = {}
            package_dicts[package.lower()]['osx'] = True if package in sources[
                'mac']['to_be_packaged'] else False
            package_dicts[package.lower()]['win'] = True if package in sources[
                'win']['to_be_packaged'] else False
            package_dicts[
                package.lower()]['linux'] = True if package in sources[
                    'linux']['to_be_packaged'] else False
        for package in sorted(list(to_be_packaged)):
            cran_package = cran_metadata[package.lower()]

            package_dicts[package.lower()].update({
                'cran_packagename':
                package,
                'packagename':
                'r-' + package.lower(),
                'patches':
                '',
                'build_number':
                0,
                'build_depends':
                '',
                'run_depends':
                '',
                # CRAN doesn't seem to have this metadata :(
                'home_comment':
                '#',
                'homeurl':
                '',
                'summary_comment':
                '#',
                'summary':
                '',
            })
            d = package_dicts[package.lower()]
            d['url_key'] = 'url:'
            d['fn_key'] = 'fn:'
            d['git_url_key'] = ''
            d['git_tag_key'] = ''
            d['git_url'] = ''
            d['git_tag'] = ''
            d['hash_entry'] = ''
            d['build'], d['skip'] = build_and_skip_olw[(
                package_dicts[package.lower()]['osx'],
                package_dicts[package.lower()]['linux'],
                package_dicts[package.lower()]['win'])]
            d['cran_version'] = cran_package['Version']
            # Conda versions cannot have -. Conda (verlib) will treat _ as a .
            d['conda_version'] = d['cran_version'].replace('-', '_')

            patches = []
            script_env = []
            extra_recipe_maintainers = []
            build_number = 0
            if not len(patches):
                patches.append("# patches:\n")
                patches.append("   # List any patch files here\n")
                patches.append("   # - fix.patch")
            if len(extra_recipe_maintainers):
                extra_recipe_maintainers[1:].sort()
                extra_recipe_maintainers.insert(0, "extra:\n  ")
            d['build_number'] = build_number

            cached_path = None
            d['cran_metadata'] = '\n'.join(
                ['# %s' % l for l in cran_package['orig_lines'] if l])

            # XXX: We should maybe normalize these
            d['license'] = cran_package.get("License", "None")
            d['license_family'] = guess_license_family(
                d['license'], allowed_license_families)

            if 'License_is_FOSS' in cran_package:
                d['license'] += ' (FOSS)'
            if cran_package.get('License_restricts_use') == 'yes':
                d['license'] += ' (Restricts use)'

            if "URL" in cran_package:
                d['home_comment'] = ''
                d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
            else:
                # use CRAN page as homepage if nothing has been specified
                d['home_comment'] = ''
                d['homeurl'] = ' https://mran.microsoft.com/package/{}'.format(
                    package)

            if 'Description' in cran_package:
                d['summary_comment'] = ''
                d['summary'] = ' ' + yaml_quote_string(
                    cran_package['Description'], indent=6)

            if "Suggests" in cran_package:
                d['suggests'] = "    # Suggests: %s" % cran_package['Suggests']
            else:
                d['suggests'] = ''

            if package.lower() == 'revoutilsmath':
                d['always_include_files'] = "      always_include_files:\n" \
                                            "        - lib/R/lib/libRblas.so  # [linux]"
            else:
                d['always_include_files'] = ''

            # Every package depends on at least R.
            # I'm not sure what the difference between depends and imports is.
            depends = [
                s.strip() for s in cran_package.get('Depends', '').split(',')
                if s.strip()
            ]
            imports = [
                s.strip() for s in cran_package.get('Imports', '').split(',')
                if s.strip()
            ]
            links = [
                s.strip() for s in cran_package.get("LinkingTo", '').split(',')
                if s.strip()
            ]

            dep_dict = {}

            seen = set()
            for s in list(chain(imports, depends, links)):
                match = VERSION_DEPENDENCY_REGEX.match(s)
                if not match:
                    sys.exit(
                        "Could not parse version from dependency of %s: %s" %
                        (package, s))
                name = match.group('name')
                if name in seen:
                    continue
                seen.add(name)
                archs = match.group('archs')
                relop = match.group('relop') or ''
                ver = match.group('version') or ''
                ver = ver.replace('-', '_')
                # If there is a relop there should be a version
                assert not relop or ver

                if archs:
                    sys.exit(
                        "Don't know how to handle archs from dependency of "
                        "package %s: %s" % (package, s))

                dep_dict[name] = '{relop}{version}'.format(relop=relop,
                                                           version=ver)

            if 'R' not in dep_dict:
                dep_dict['R'] = ''

            need_git = is_github_url
            if cran_package.get("NeedsCompilation", 'no') == 'yes' and False:
                with tarfile.open(cached_path) as tf:
                    need_f = any([
                        f.name.lower().endswith(('.f', '.f90', '.f77'))
                        for f in tf
                    ])
                    # Fortran builds use CC to perform the link (they do not call the linker directly).
                    need_c = True if need_f else \
                        any([f.name.lower().endswith('.c') for f in tf])
                    need_cxx = any([
                        f.name.lower().endswith(
                            ('.cxx', '.cpp', '.cc', '.c++')) for f in tf
                    ])
                    need_autotools = any(
                        [f.name.lower().endswith('/configure') for f in tf])
                    need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \
                        any([f.name.lower().endswith(('/makefile', '/makevars'))
                             for f in tf])
            else:
                need_c = need_cxx = need_f = need_autotools = need_make = False
            for dep_type in ['build', 'run']:

                deps = []
                # Put non-R dependencies first.
                if dep_type == 'build':
                    if need_c:
                        deps.append(
                            "{indent}{{{{ compiler('c') }}}}        # [not win]"
                            .format(indent=INDENT))
                    if need_cxx:
                        deps.append(
                            "{indent}{{{{ compiler('cxx') }}}}      # [not win]"
                            .format(indent=INDENT))
                    if need_f:
                        deps.append(
                            "{indent}{{{{ compiler('fortran') }}}}  # [not win]"
                            .format(indent=INDENT))
                    if need_c or need_cxx or need_f:
                        deps.append(
                            "{indent}{{{{native}}}}toolchain        # [win]".
                            format(indent=INDENT))
                    if need_autotools or need_make or need_git:
                        deps.append(
                            "{indent}{{{{posix}}}}filesystem        # [win]".
                            format(indent=INDENT))
                    if need_git:
                        deps.append(
                            "{indent}{{{{posix}}}}git".format(indent=INDENT))
                    if need_autotools:
                        deps.append(
                            "{indent}{{{{posix}}}}sed               # [win]".
                            format(indent=INDENT))
                        deps.append(
                            "{indent}{{{{posix}}}}grep              # [win]".
                            format(indent=INDENT))
                        deps.append("{indent}{{{{posix}}}}autoconf".format(
                            indent=INDENT))
                        deps.append("{indent}{{{{posix}}}}automake".format(
                            indent=INDENT))
                        deps.append("{indent}{{{{posix}}}}pkg-config".format(
                            indent=INDENT))
                    if need_make:
                        deps.append(
                            "{indent}{{{{posix}}}}make".format(indent=INDENT))
                elif dep_type == 'run':
                    if need_c or need_cxx or need_f:
                        deps.append(
                            "{indent}{{{{native}}}}gcc-libs         # [win]".
                            format(indent=INDENT))

                for name in sorted(dep_dict):
                    if name in R_BASE_PACKAGE_NAMES:
                        continue
                    if name == 'R':
                        # Put R first
                        # Regardless of build or run, and whether this is a recommended package or not,
                        # it can only depend on 'r-base' since anything else can and will cause cycles
                        # in the dependency graph. The cran metadata lists all dependencies anyway, even
                        # those packages that are in the recommended group.
                        # r_name = 'r-base ' + VERSION
                        # We don't include any R version restrictions because we
                        # always build R packages against an exact R version
                        # deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name))
                        # Bit of a hack since I added overlinking checking.
                        r_name = 'mro-base ' + VERSION
                        deps.insert(
                            0, '{indent}{r_name}'.format(indent=INDENT,
                                                         r_name=r_name))
                    else:
                        conda_name = 'r-' + name.lower()

                        if dep_dict[name]:
                            deps.append('{indent}{name} {version}'.format(
                                name=conda_name,
                                version=dep_dict[name],
                                indent=INDENT))
                        else:
                            deps.append('{indent}{name}'.format(
                                name=conda_name, indent=INDENT))

                d['dep_dict'] = dep_dict  # We need this for (1)

                # Make pin_subpackage from the deps. Done separately so the above is the same as conda-build's
                # CRAN skeleton (so it is easy to refactor CRAN skeleton so it can be reused here later).
                for i, dep in enumerate(deps):
                    groups = re.match('(\n.* - )([\w-]+) ?([>=\w0-9._]+)?',
                                      dep, re.MULTILINE)
                    indent = groups.group(1)
                    name = groups.group(2)
                    pinning = groups.group(3)
                    if pinning:
                        if '>=' in pinning:
                            deps[
                                i] = "{}{{{{ pin_subpackage('{}', min_pin='{}', max_pin=None) }}}}".format(
                                    indent, name, pinning.replace('>=', ''))
                        else:
                            if name == 'mro-base':
                                # We end up with filenames with 'r34*' in them unless we specify the version here.
                                # TODO :: Ask @msarahan about this.
                                deps[i] = "{}{} {{{{ version }}}}".format(
                                    indent, name)
                            else:
                                deps[
                                    i] = "{}{{{{ pin_subpackage('{}', min_pin='{}', max_pin='{}') }}}}".format(
                                        indent, name, pinning, pinning)
                    else:
                        deps[
                            i] = "{}{{{{ pin_subpackage('{}', min_pin='x.x.x.x.x.x', max_pin='x.x.x.x.x.x') }}}}".format(
                                indent, name)

                # Add missing conda package dependencies.
                if dep_type == 'run':
                    if d['packagename'] in extra_deps:
                        for extra_dep in extra_deps[d['packagename']]:
                            print("extra_dep is {}".format(extra_dep))
                            deps.append(extra_dep)
                        print(deps)

                d['%s_depends' % dep_type] = ''.join(deps)

        template = {
            'version': VERSION,
            'win_url': sources['win']['url'],
            'win_fn': sources['win']['fn'],
            'win_sha': sources['win']['sha'],
            'linux_url': sources['linux']['url'],
            'linux_fn': sources['linux']['fn'],
            'linux_sha': sources['linux']['sha'],
            'mac_url': sources['mac']['url'],
            'mac_fn': sources['mac']['fn'],
            'mac_sha': sources['mac']['sha']
        }

        with open(os.path.join(this_dir, 'meta.yaml'), 'w') as meta_yaml:
            meta_yaml.write(HEADER.format(**template))
            meta_yaml.write(BASE_PACKAGE)

            for package in package_dicts:
                d = package_dicts[package]

                # Normalize the metadata values
                d = {
                    k: unicodedata.normalize("NFKD", text_type(v)).encode(
                        'ascii', 'ignore').decode()
                    for k, v in iteritems(d)
                }

                meta_yaml.write(PACKAGE.format(**d))

            meta_yaml.write(MRO_BASICS_METAPACKAGE)
            meta_subs = []
            for package in package_dicts:
                meta_subs.append('        - {}{}'.format(
                    package_dicts[package]['packagename'],
                    package_dicts[package]['build']))
            meta_yaml.write('\n'.join(sorted(meta_subs)))

        with open(os.path.join(this_dir, 'build.sh'), 'w') as build_sh:
            build_sh.write(BUILD_SH.format(**template))

        with open(os.path.join(this_dir, 'install-mro-base.sh'),
                  'w') as install_mro_base:
            install_mro_base.write(INSTALL_MRO_BASE_HEADER.format(**template))
            for excluded in sorted(to_be_packaged, key=lambda s: s.lower()):
                install_mro_base.write('EXCLUDED_PACKAGES+=(' + excluded +
                                       ')\n')
            install_mro_base.write(INSTALL_MRO_BASE_FOOTER.format(**template))

        with open(os.path.join(this_dir, 'install-r-package.sh'),
                  'w') as install_r_package:
            install_r_package.write(INSTALL_R_PACKAGE.format(**template))
示例#36
0
def test_gpl2():
    licenses = {u"GPL-2", u"GPL-2 | file LICENSE", u"GNU General Public License v2 or later (GPLv2+)"}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u"GPL2"
示例#37
0
def test_not_gpl2():
    licenses = {u"GPL (>= 2)", u"LGPL (>= 2)", u"GPL", u"LGPL-3", u"GPL 3", u"GPL (>= 3)", u"Apache License (== 2.0)"}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam != u"GPL2"
示例#38
0
文件: cran.py 项目: cav71/conda-build
def skeletonize(in_packages, output_dir=".", output_suffix="", add_maintainer=None, version=None,
                git_tag=None, cran_url="https://cran.r-project.org", recursive=False, archive=True,
                version_compare=False, update_policy='', r_interp='r-base', use_binaries_ver=None,
                use_noarch_generic=False, use_rtools_win=False, config=None):

    output_dir = realpath(output_dir)

    if not config:
        config = Config()

    if len(in_packages) > 1 and version_compare:
        raise ValueError("--version-compare only works with one package at a time")
    if update_policy == 'error' and not in_packages:
        raise ValueError("At least one package must be supplied")

    package_dicts = {}
    package_list = []

    cran_url = cran_url.rstrip('/')
    cran_metadata = get_cran_metadata(cran_url, output_dir)

    # r_recipes_in_output_dir = []
    # recipes = listdir(output_dir)
    # for recipe in recipes:
    #     if not recipe.startswith('r-') or not isdir(recipe):
    #         continue
    #     r_recipes_in_output_dir.append(recipe)

    for package in in_packages:
        inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, package)
        if inputs_dict:
            package_dicts.update({inputs_dict['pkg-name']: {'inputs': inputs_dict}})

    for package_name, package_dict in package_dicts.items():
        package_list.append(package_name)

    while package_list:
        inputs = package_dicts[package_list.pop()]['inputs']
        location = inputs['location']
        pkg_name = inputs['pkg-name']
        is_github_url = location and 'github.com' in location
        is_tarfile = location and isfile(location) and tarfile.is_tarfile(location)
        url = inputs['location']

        dir_path = inputs['new-location']
        print("Making/refreshing recipe for {}".format(pkg_name))

        # Bodges GitHub packages into cran_metadata
        if is_github_url or is_tarfile:
            rm_rf(config.work_dir)
            if is_github_url:
                m = metadata.MetaData.fromdict({'source': {'git_url': location}}, config=config)
                source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir)
                new_git_tag = git_tag if git_tag else get_latest_git_tag(config)
                p = subprocess.Popen(['git', 'checkout', new_git_tag], stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE, cwd=config.work_dir)
                stdout, stderr = p.communicate()
                stdout = stdout.decode('utf-8')
                stderr = stderr.decode('utf-8')
                if p.returncode:
                    sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" %
                             (new_git_tag, stderr.strip()))
                if stdout:
                    print(stdout, file=sys.stdout)
                if stderr:
                    print(stderr, file=sys.stderr)
            else:
                m = metadata.MetaData.fromdict({'source': {'url': location}}, config=config)
                source.unpack(m.get_section('source'), m.config.work_dir, m.config.src_cache,
                              output_dir, m.config.work_dir)
            DESCRIPTION = join(config.work_dir, "DESCRIPTION")
            if not isfile(DESCRIPTION):
                sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION")
                sub_description_name = join(config.work_dir, location.split('/')[-1], "DESCRIPTION")
                if isfile(sub_description_pkg):
                    DESCRIPTION = sub_description_pkg
                elif isfile(sub_description_name):
                    DESCRIPTION = sub_description_name
                else:
                    sys.exit("%s does not appear to be a valid R package "
                             "(no DESCRIPTION file in %s, %s)"
                                 % (location, sub_description_pkg, sub_description_name))

            with open(DESCRIPTION) as f:
                description_text = clear_whitespace(f.read())

            d = dict_from_cran_lines(remove_package_line_continuations(
                description_text.splitlines()))
            d['orig_description'] = description_text
            package = d['Package'].lower()
            cran_metadata[package] = d
        else:
            package = pkg_name

        if pkg_name not in cran_metadata:
            sys.exit("Package %s not found" % pkg_name)

        # Make sure package always uses the CRAN capitalization
        package = cran_metadata[package.lower()]['Package']

        if not is_github_url and not is_tarfile:
            session = get_session(output_dir)
            cran_metadata[package.lower()].update(get_package_metadata(cran_url,
            package, session))

        cran_package = cran_metadata[package.lower()]

        package_dicts[package.lower()].update(
            {
                'cran_packagename': package,
                'packagename': 'r-' + package.lower(),
                'patches': '',
                'build_number': 0,
                'build_depends': '',
                'host_depends': '',
                'run_depends': '',
                # CRAN doesn't seem to have this metadata :(
                'home_comment': '#',
                'homeurl': '',
                'summary_comment': '#',
                'summary': '',
            })
        d = package_dicts[package.lower()]
        d['binary1'] = ''
        d['binary2'] = ''

        if version:
            d['version'] = version
            raise NotImplementedError("Package versions from CRAN are not yet implemented")

        d['cran_version'] = cran_package['Version']
        # Conda versions cannot have -. Conda (verlib) will treat _ as a .
        d['conda_version'] = d['cran_version'].replace('-', '_')
        if version_compare:
            sys.exit(not version_compare(dir_path, d['conda_version']))

        patches = []
        script_env = []
        extra_recipe_maintainers = []
        build_number = 0
        if update_policy.startswith('merge') and inputs['old-metadata']:
            m = inputs['old-metadata']
            patches = make_array(m, 'source/patches')
            script_env = make_array(m, 'build/script_env')
            extra_recipe_maintainers = make_array(m, 'extra/recipe-maintainers', add_maintainer)
            if m.version() == d['conda_version']:
                build_number = int(m.get_value('build/number', 0))
                build_number += 1 if update_policy == 'merge-incr-build-num' else 0
        if add_maintainer:
            new_maintainer = "{indent}{add_maintainer}".format(indent=INDENT,
                                                               add_maintainer=add_maintainer)
            if new_maintainer not in extra_recipe_maintainers:
                if not len(extra_recipe_maintainers):
                    # We hit this case when there is no existing recipe.
                    extra_recipe_maintainers = make_array({}, 'extra/recipe-maintainers', True)
                extra_recipe_maintainers.append(new_maintainer)
        if len(extra_recipe_maintainers):
            extra_recipe_maintainers[1:].sort()
            extra_recipe_maintainers.insert(0, "extra:\n  ")
        d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers)
        d['patches'] = ''.join(patches)
        d['script_env'] = ''.join(script_env)
        d['build_number'] = build_number

        cached_path = None
        cran_layout = {'source': {'selector': '{others}',
                                  'dir': 'src/contrib/',
                                  'ext': '.tar.gz',
                                  # If we had platform filters we would change this to:
                                  # build_for_linux or is_github_url or is_tarfile
                                  'use_this': True},
                       'win-64': {'selector': 'win64',
                                  'dir': 'bin/windows/contrib/{}/'.format(use_binaries_ver),
                                  'ext': '.zip',
                                  'use_this': True if use_binaries_ver else False},
                       'osx-64': {'selector': 'osx',
                                  'dir': 'bin/macosx/el-capitan/contrib/{}/'.format(
                                      use_binaries_ver),
                                  'ext': '.tgz',
                                  'use_this': True if use_binaries_ver else False}}
        available = {}
        for archive_type, archive_details in iteritems(cran_layout):
            contrib_url = ''
            if archive_details['use_this']:
                if is_tarfile:
                    filename = basename(location)
                    contrib_url = relpath(location, dir_path)
                    contrib_url_rendered = package_url = contrib_url
                    sha256 = hashlib.sha256()
                    cached_path = location
                elif not is_github_url:
                    filename = '{}_{}'.format(package, d['cran_version']) + archive_details['ext']
                    contrib_url = '{{{{ cran_mirror }}}}/{}'.format(archive_details['dir'])
                    contrib_url_rendered = cran_url + '/{}'.format(archive_details['dir'])
                    package_url = contrib_url_rendered + filename
                    sha256 = hashlib.sha256()
                    print("Downloading {} from {}".format(archive_type, package_url))
                    # We may need to inspect the file later to determine which compilers are needed.
                    cached_path, _ = source.download_to_cache(config.src_cache, '',
                                                    dict({'url': package_url,
                                                            'fn': archive_type + '-' + filename}))
                available_details = {}
                available_details['selector'] = archive_details['selector']
                if cached_path:
                    sha256.update(open(cached_path, 'rb').read())
                    available_details['filename'] = filename
                    available_details['contrib_url'] = contrib_url
                    available_details['contrib_url_rendered'] = contrib_url_rendered
                    available_details['package_url'] = package_url
                    available_details['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest())
                    available_details['cached_path'] = cached_path
                # This is rubbish; d[] should be renamed global[] and should be
                #      merged into source and binaryN.
                if archive_type == 'source':
                    if is_github_url:
                        available_details['url_key'] = ''
                        available_details['fn_key'] = ''
                        available_details['git_url_key'] = 'git_url:'
                        available_details['git_tag_key'] = 'git_tag:'
                        hash_msg = '# You can add a hash for the file here, (md5, sha1 or sha256)'
                        available_details['hash_entry'] = hash_msg
                        available_details['filename'] = ''
                        available_details['cranurl'] = ''
                        available_details['git_url'] = url
                        available_details['git_tag'] = new_git_tag
                        available_details['archive_keys'] = ''
                    else:
                        available_details['url_key'] = 'url:'
                        available_details['fn_key'] = 'fn:'
                        available_details['git_url_key'] = ''
                        available_details['git_tag_key'] = ''
                        available_details['cranurl'] = ' ' + contrib_url + filename
                        available_details['git_url'] = ''
                        available_details['git_tag'] = ''
                available_details['patches'] = d['patches']
                available[archive_type] = available_details

        # Figure out the selectors according to what is available.
        _all = ['linux', 'win32', 'win64', 'osx']
        from_source = _all[:]
        binary_id = 1
        for archive_type, archive_details in iteritems(available):
            if archive_type != 'source':
                sel = archive_details['selector']
                from_source.remove(sel)
                binary_id += 1
            else:
                for k, v in iteritems(archive_details):
                    d[k] = v
        if from_source == _all:
            sel_src = ""
            sel_src_and_win = '  # [win]'
            sel_src_not_win = '  # [not win]'
        else:
            sel_src = '  # [' + ' or '.join(from_source) + ']'
            sel_src_and_win = '  # [' + ' or '.join(fs for fs in from_source if
                                                    fs.startswith('win')) + ']'
            sel_src_not_win = '  # [' + ' or '.join(fs for fs in from_source if not
                                                    fs.startswith('win')) + ']'

        d['sel_src'] = sel_src
        d['sel_src_and_win'] = sel_src_and_win
        d['sel_src_not_win'] = sel_src_not_win

        if 'source' in available:
            available_details = available['source']
            available_details['sel'] = sel_src
            filename = available_details['filename']
            if 'contrib_url' in available_details:
                contrib_url = available_details['contrib_url']
                if archive:
                    if is_tarfile:
                        available_details['cranurl'] = (INDENT + contrib_url)
                    else:
                        available_details['cranurl'] = (INDENT + contrib_url +
                            filename + sel_src + INDENT + contrib_url +
                            'Archive/{}/'.format(package) + filename + sel_src)
                else:
                    available_details['cranurl'] = ' ' + contrib_url + filename + sel_src
            if not is_github_url:
                available_details['archive_keys'] = '{fn_key} {filename}{sel}\n' \
                                                    '  {url_key}{sel}' \
                                                    '    {cranurl}\n' \
                                                    '  {hash_entry}{sel}'.format(
                    **available_details)

        d['cran_metadata'] = '\n'.join(['# %s' % l for l in
            cran_package['orig_lines'] if l])

        # Render the source and binaryN keys
        binary_id = 1
        for archive_type, archive_details in iteritems(available):
            if archive_type == 'source':
                d['source'] = SOURCE_META.format(**archive_details)
            else:
                archive_details['sel'] = '  # [' + archive_details['selector'] + ']'
                d['binary' + str(binary_id)] = BINARY_META.format(**archive_details)
                binary_id += 1

        # XXX: We should maybe normalize these
        d['license'] = cran_package.get("License", "None")
        d['license_family'] = guess_license_family(d['license'], allowed_license_families)

        if 'License_is_FOSS' in cran_package:
            d['license'] += ' (FOSS)'
        if cran_package.get('License_restricts_use') == 'yes':
            d['license'] += ' (Restricts use)'

        if "URL" in cran_package:
            d['home_comment'] = ''
            d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
        else:
            # use CRAN page as homepage if nothing has been specified
            d['home_comment'] = ''
            if is_github_url:
                d['homeurl'] = ' {}'.format(location)
            else:
                d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package)

        if not use_noarch_generic or cran_package.get("NeedsCompilation", 'no') == 'yes':
            d['noarch_generic'] = ''
        else:
            d['noarch_generic'] = 'noarch: generic'

        if 'Description' in cran_package:
            d['summary_comment'] = ''
            d['summary'] = ' ' + yaml_quote_string(cran_package['Description'])

        if "Suggests" in cran_package:
            d['suggests'] = "# Suggests: %s" % cran_package['Suggests']
        else:
            d['suggests'] = ''

        # Every package depends on at least R.
        # I'm not sure what the difference between depends and imports is.
        depends = [s.strip() for s in cran_package.get('Depends',
            '').split(',') if s.strip()]
        imports = [s.strip() for s in cran_package.get('Imports',
            '').split(',') if s.strip()]
        links = [s.strip() for s in cran_package.get("LinkingTo",
            '').split(',') if s.strip()]

        dep_dict = {}

        seen = set()
        for s in list(chain(imports, depends, links)):
            match = VERSION_DEPENDENCY_REGEX.match(s)
            if not match:
                sys.exit("Could not parse version from dependency of %s: %s" %
                    (package, s))
            name = match.group('name')
            if name in seen:
                continue
            seen.add(name)
            archs = match.group('archs')
            relop = match.group('relop') or ''
            ver = match.group('version') or ''
            ver = ver.replace('-', '_')
            # If there is a relop there should be a version
            assert not relop or ver

            if archs:
                sys.exit("Don't know how to handle archs from dependency of "
                "package %s: %s" % (package, s))

            dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver)

        if 'R' not in dep_dict:
            dep_dict['R'] = ''

        need_git = is_github_url
        if cran_package.get("NeedsCompilation", 'no') == 'yes':
            with tarfile.open(available['source']['cached_path']) as tf:
                need_f = any([f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf])
                # Fortran builds use CC to perform the link (they do not call the linker directly).
                need_c = True if need_f else \
                    any([f.name.lower().endswith('.c') for f in tf])
                need_cxx = any([f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++'))
                                         for f in tf])
                need_autotools = any([f.name.lower().endswith('/configure') for f in tf])
                need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \
                    any([f.name.lower().endswith(('/makefile', '/makevars'))
                        for f in tf])
        else:
            need_c = need_cxx = need_f = need_autotools = need_make = False

        if 'Rcpp' in dep_dict or 'RcppArmadillo' in dep_dict:
            need_cxx = True

        if need_cxx:
            need_c = True

        for dep_type in ['build', 'host', 'run']:

            deps = []
            # Put non-R dependencies first.
            if dep_type == 'build':
                if need_c:
                    deps.append("{indent}{{{{ compiler('c') }}}}        {sel}".format(
                        indent=INDENT, sel=sel_src_not_win))
                if need_cxx:
                    deps.append("{indent}{{{{ compiler('cxx') }}}}      {sel}".format(
                        indent=INDENT, sel=sel_src_not_win))
                if need_f:
                    deps.append("{indent}{{{{ compiler('fortran') }}}}  {sel}".format(
                        indent=INDENT, sel=sel_src_not_win))
                if use_rtools_win:
                    need_c = need_cxx = need_f = need_autotools = need_make = False
                    deps.append("{indent}{{{{native}}}}rtools        {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{native}}}}extsoft       {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                if need_c or need_cxx or need_f:
                    deps.append("{indent}{{{{native}}}}toolchain        {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                if need_autotools or need_make or need_git:
                    deps.append("{indent}{{{{posix}}}}filesystem        {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                if need_git:
                    deps.append("{indent}{{{{posix}}}}git".format(indent=INDENT))
                if need_autotools:
                    deps.append("{indent}{{{{posix}}}}sed               {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}grep              {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}autoconf          {sel}".format(
                        indent=INDENT, sel=sel_src))
                    deps.append("{indent}{{{{posix}}}}automake-wrapper  {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}automake          {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}pkg-config".format(indent=INDENT))
                if need_make:
                    deps.append("{indent}{{{{posix}}}}make              {sel}".format(
                        indent=INDENT, sel=sel_src))
            elif dep_type == 'run':
                if need_c or need_cxx or need_f:
                    deps.append("{indent}{{{{native}}}}gcc-libs         {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))

            if dep_type == 'host' or dep_type == 'run':
                for name in sorted(dep_dict):
                    if name in R_BASE_PACKAGE_NAMES:
                        continue
                    if name == 'R':
                        # Put R first
                        # Regarless of build or run, and whether this is a
                        # recommended package or not, it can only depend on
                        # r_interp since anything else can and will cause
                        # cycles in the dependency graph. The cran metadata
                        # lists all dependencies anyway, even those packages
                        # that are in the recommended group.
                        # We don't include any R version restrictions because
                        # conda-build always pins r-base and mro-base version.
                        deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_interp))
                    else:
                        conda_name = 'r-' + name.lower()

                        if dep_dict[name]:
                            deps.append('{indent}{name} {version}'.format(name=conda_name,
                                version=dep_dict[name], indent=INDENT))
                        else:
                            deps.append('{indent}{name}'.format(name=conda_name,
                                indent=INDENT))
                        if recursive:
                            lower_name = name.lower()
                            if lower_name not in package_dicts:
                                inputs_dict = package_to_inputs_dict(output_dir, output_suffix,
                                                                     git_tag, lower_name)
                                assert lower_name == inputs_dict['pkg-name'], \
                                    "name %s != inputs_dict['pkg-name'] %s" % (
                                        name, inputs_dict['pkg-name'])
                                assert lower_name not in package_list
                                package_dicts.update({lower_name: {'inputs': inputs_dict}})
                                package_list.append(lower_name)

            d['%s_depends' % dep_type] = ''.join(deps)

    for package in package_dicts:
        d = package_dicts[package]
        dir_path = d['inputs']['new-location']
        if exists(dir_path) and not version_compare:
            if update_policy == 'error':
                raise RuntimeError("directory already exists "
                                   "(and --update-policy is 'error'): %s" % dir_path)
            elif update_policy == 'overwrite':
                rm_rf(dir_path)
        elif update_policy == 'skip-up-to-date' and up_to_date(cran_metadata,
                                                               d['inputs']['old-metadata']):
            continue
        elif update_policy == 'skip-existing' and d['inputs']['old-metadata']:
            continue

        # Normalize the metadata values
        d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore')
             .decode() for k, v in iteritems(d)}
        try:
            makedirs(join(dir_path))
        except:
            pass
        print("Writing recipe for %s" % package.lower())
        with open(join(dir_path, 'meta.yaml'), 'w') as f:
            f.write(clear_whitespace(CRAN_META.format(**d)))
        if not exists(join(dir_path, 'build.sh')) or update_policy == 'overwrite':
            with open(join(dir_path, 'build.sh'), 'w') as f:
                if from_source == all:
                    f.write(CRAN_BUILD_SH_SOURCE.format(**d))
                elif from_source == []:
                    f.write(CRAN_BUILD_SH_BINARY.format(**d))
                else:
                    tpbt = [target_platform_bash_test_by_sel[t] for t in from_source]
                    d['source_pf_bash'] = ' || '.join(['[[ $target_platform ' + s + ' ]]'
                                                  for s in tpbt])
                    f.write(CRAN_BUILD_SH_MIXED.format(**d))

        if not exists(join(dir_path, 'bld.bat')) or update_policy == 'overwrite':
            with open(join(dir_path, 'bld.bat'), 'w') as f:
                if len([fs for fs in from_source if fs.startswith('win')]) == 2:
                    f.write(CRAN_BLD_BAT_SOURCE.format(**d))
                else:
                    f.write(CRAN_BLD_BAT_MIXED.format(**d))
示例#39
0
def skeletonize(packages, output_dir=".", version=None, git_tag=None,
                cran_url="https://cran.r-project.org/", recursive=False, archive=True,
                version_compare=False, update_outdated=False, config=None):

    if not config:
        config = Config()

    if len(packages) > 1 and version_compare:
        raise ValueError("--version-compare only works with one package at a time")
    if not update_outdated and not packages:
        raise ValueError("At least one package must be supplied")

    package_dicts = {}

    cran_metadata = get_cran_metadata(cran_url, output_dir)

    if update_outdated:
        packages = get_outdated(output_dir, cran_metadata, packages)
        for pkg in packages:
            rm_rf(join(output_dir[0], 'r-' + pkg))

    while packages:
        package = packages.pop()

        is_github_url = 'github.com' in package
        url = package

        if is_github_url:
            rm_rf(config.work_dir)
            m = metadata.MetaData.fromdict({'source': {'git_url': package}}, config=config)
            source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir)
            git_tag = git_tag[0] if git_tag else get_latest_git_tag(config)
            p = subprocess.Popen(['git', 'checkout', git_tag], stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE, cwd=config.work_dir)
            stdout, stderr = p.communicate()
            stdout = stdout.decode('utf-8')
            stderr = stderr.decode('utf-8')
            if p.returncode:
                sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" %
                         (git_tag, stderr.strip()))
            if stdout:
                print(stdout, file=sys.stdout)
            if stderr:
                print(stderr, file=sys.stderr)

            DESCRIPTION = join(config.work_dir, "DESCRIPTION")
            if not isfile(DESCRIPTION):
                sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION")
                sub_description_name = join(config.work_dir, package.split('/')[-1], "DESCRIPTION")
                if isfile(sub_description_pkg):
                    DESCRIPTION = sub_description_pkg
                elif isfile(sub_description_name):
                    DESCRIPTION = sub_description_name
                else:
                    sys.exit("%s does not appear to be a valid R package "
                             "(no DESCRIPTION file in %s, %s)"
                                 % (package, sub_description_pkg, sub_description_name))

            with open(DESCRIPTION) as f:
                description_text = clear_trailing_whitespace(f.read())

            d = dict_from_cran_lines(remove_package_line_continuations(
                description_text.splitlines()))
            d['orig_description'] = description_text
            package = d['Package'].lower()
            cran_metadata[package] = d

        if package.startswith('r-'):
            package = package[2:]
        if package.endswith('/'):
            package = package[:-1]
        if package.lower() not in cran_metadata:
            sys.exit("Package %s not found" % package)

        # Make sure package is always uses the CRAN capitalization
        package = cran_metadata[package.lower()]['Package']

        if not is_github_url:
            session = get_session(output_dir)
            cran_metadata[package.lower()].update(get_package_metadata(cran_url,
            package, session))

        dir_path = join(output_dir, 'r-' + package.lower())
        if exists(dir_path) and not version_compare:
            raise RuntimeError("directory already exists: %s" % dir_path)

        cran_package = cran_metadata[package.lower()]

        d = package_dicts.setdefault(package,
            {
                'cran_packagename': package,
                'packagename': 'r-' + package.lower(),
                'build_depends': '',
                'run_depends': '',
                # CRAN doesn't seem to have this metadata :(
                'home_comment': '#',
                'homeurl': '',
                'summary_comment': '#',
                'summary': '',
            })

        if is_github_url:
            d['url_key'] = ''
            d['fn_key'] = ''
            d['git_url_key'] = 'git_url:'
            d['git_tag_key'] = 'git_tag:'
            d['hash_entry'] = '# You can add a hash for the file here, like md5, sha1 or sha256'
            d['filename'] = ''
            d['cranurl'] = ''
            d['git_url'] = url
            d['git_tag'] = git_tag
        else:
            d['url_key'] = 'url:'
            d['fn_key'] = 'fn:'
            d['git_url_key'] = ''
            d['git_tag_key'] = ''
            d['git_url'] = ''
            d['git_tag'] = ''
            d['hash_entry'] = ''

        if version:
            d['version'] = version
            raise NotImplementedError("Package versions from CRAN are not yet implemented")

        d['cran_version'] = cran_package['Version']
        # Conda versions cannot have -. Conda (verlib) will treat _ as a .
        d['conda_version'] = d['cran_version'].replace('-', '_')
        if version_compare:
            sys.exit(not version_compare(dir_path, d['conda_version']))

        if not is_github_url:
            filename = '{}_{}.tar.gz'
            contrib_url = cran_url + 'src/contrib/'
            package_url = contrib_url + filename.format(package, d['cran_version'])

            # calculate sha256 by downloading source
            sha256 = hashlib.sha256()
            print("Downloading source from {}".format(package_url))
            sha256.update(urlopen(package_url).read())
            d['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest())

            d['filename'] = filename.format(package, '{{ version }}')
            if archive:
                d['cranurl'] = (INDENT + contrib_url +
                    d['filename'] + INDENT + contrib_url +
                    'Archive/{}/'.format(package) + d['filename'])
            else:
                d['cranurl'] = ' ' + cran_url + 'src/contrib/' + d['filename']

        d['cran_metadata'] = '\n'.join(['# %s' % l for l in
            cran_package['orig_lines'] if l])

        # XXX: We should maybe normalize these
        d['license'] = cran_package.get("License", "None")
        d['license_family'] = guess_license_family(d['license'], allowed_license_families)

        if 'License_is_FOSS' in cran_package:
            d['license'] += ' (FOSS)'
        if cran_package.get('License_restricts_use') == 'yes':
            d['license'] += ' (Restricts use)'

        if "URL" in cran_package:
            d['home_comment'] = ''
            d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
        else:
            # use CRAN page as homepage if nothing has been specified
            d['home_comment'] = ''
            d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package)

        if 'Description' in cran_package:
            d['summary_comment'] = ''
            d['summary'] = ' ' + yaml_quote_string(cran_package['Description'])

        if "Suggests" in cran_package:
            d['suggests'] = "# Suggests: %s" % cran_package['Suggests']
        else:
            d['suggests'] = ''

        # Every package depends on at least R.
        # I'm not sure what the difference between depends and imports is.
        depends = [s.strip() for s in cran_package.get('Depends',
            '').split(',') if s.strip()]
        imports = [s.strip() for s in cran_package.get('Imports',
            '').split(',') if s.strip()]
        links = [s.strip() for s in cran_package.get("LinkingTo",
            '').split(',') if s.strip()]

        dep_dict = {}

        for s in set(chain(depends, imports, links)):
            match = VERSION_DEPENDENCY_REGEX.match(s)
            if not match:
                sys.exit("Could not parse version from dependency of %s: %s" %
                    (package, s))
            name = match.group('name')
            archs = match.group('archs')
            relop = match.group('relop') or ''
            version = match.group('version') or ''
            version = version.replace('-', '_')
            # If there is a relop there should be a version
            assert not relop or version

            if archs:
                sys.exit("Don't know how to handle archs from dependency of "
                "package %s: %s" % (package, s))

            dep_dict[name] = '{relop}{version}'.format(relop=relop, version=version)

        if 'R' not in dep_dict:
            dep_dict['R'] = ''

        for dep_type in ['build', 'run']:
            deps = []
            for name in sorted(dep_dict):
                if name in R_BASE_PACKAGE_NAMES:
                    continue
                if name == 'R':
                    # Put R first
                    # Regarless of build or run, and whether this is a recommended package or not,
                    # it can only depend on 'r-base' since anything else can and will cause cycles
                    # in the dependency graph. The cran metadata lists all dependencies anyway, even
                    # those packages that are in the recommended group.
                    r_name = 'r-base'
                    # We don't include any R version restrictions because we
                    # always build R packages against an exact R version
                    deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name))
                else:
                    conda_name = 'r-' + name.lower()

                    if dep_dict[name]:
                        deps.append('{indent}{name} {version}'.format(name=conda_name,
                            version=dep_dict[name], indent=INDENT))
                    else:
                        deps.append('{indent}{name}'.format(name=conda_name,
                            indent=INDENT))
                    if recursive:
                        if not exists(join(output_dir, conda_name)):
                            packages.append(name)

            if cran_package.get("NeedsCompilation", 'no') == 'yes':
                if dep_type == 'build':
                    deps.append('{indent}posix                # [win]'.format(indent=INDENT))
                    deps.append('{indent}{{{{native}}}}toolchain  # [win]'.format(indent=INDENT))
                    deps.append('{indent}gcc                  # [not win]'.format(indent=INDENT))
                elif dep_type == 'run':
                    deps.append('{indent}{{{{native}}}}gcc-libs   # [win]'.format(indent=INDENT))
                    deps.append('{indent}libgcc               # [not win]'.format(indent=INDENT))
            d['%s_depends' % dep_type] = ''.join(deps)

    for package in package_dicts:
        d = package_dicts[package]
        name = d['packagename']

        # Normalize the metadata values
        d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore')
             .decode() for k, v in iteritems(d)}

        makedirs(join(output_dir, name))
        print("Writing recipe for %s" % package.lower())
        with open(join(output_dir, name, 'meta.yaml'), 'w') as f:
            f.write(clear_trailing_whitespace(CRAN_META.format(**d)))
        with open(join(output_dir, name, 'build.sh'), 'w') as f:
            f.write(CRAN_BUILD_SH.format(**d))
        with open(join(output_dir, name, 'bld.bat'), 'w') as f:
            f.write(CRAN_BLD_BAT.format(**d))

    print("Done")
示例#40
0
def skeletonize(in_packages, output_dir=".", output_suffix="", add_maintainer=None, version=None,
                git_tag=None, cran_url=None, recursive=False, archive=True,
                version_compare=False, update_policy='', r_interp='r-base', use_binaries_ver=None,
                use_noarch_generic=False, use_rtools_win=False, config=None,
                variant_config_files=None):

    output_dir = realpath(output_dir)
    config = get_or_merge_config(config, variant_config_files=variant_config_files)

    if not cran_url:
        with TemporaryDirectory() as t:
            _variant = get_package_variants(t, config)[0]
        cran_url = ensure_list(_variant.get('cran_mirror', DEFAULT_VARIANTS['cran_mirror']))[0]

    if len(in_packages) > 1 and version_compare:
        raise ValueError("--version-compare only works with one package at a time")
    if update_policy == 'error' and not in_packages:
        raise ValueError("At least one package must be supplied")

    package_dicts = {}
    package_list = []

    cran_url = cran_url.rstrip('/')
    cran_metadata = get_cran_metadata(cran_url, output_dir)

    # r_recipes_in_output_dir = []
    # recipes = listdir(output_dir)
    # for recipe in recipes:
    #     if not recipe.startswith('r-') or not isdir(recipe):
    #         continue
    #     r_recipes_in_output_dir.append(recipe)

    for package in in_packages:
        inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, package)
        if inputs_dict:
            package_dicts.update({inputs_dict['pkg-name']: {'inputs': inputs_dict}})

    for package_name, package_dict in package_dicts.items():
        package_list.append(package_name)

    while package_list:
        inputs = package_dicts[package_list.pop()]['inputs']
        location = inputs['location']
        pkg_name = inputs['pkg-name']
        is_github_url = location and 'github.com' in location
        is_tarfile = location and isfile(location) and tarfile.is_tarfile(location)
        url = inputs['location']

        dir_path = inputs['new-location']
        print("Making/refreshing recipe for {}".format(pkg_name))

        # Bodges GitHub packages into cran_metadata
        if is_github_url or is_tarfile:
            rm_rf(config.work_dir)
            if is_github_url:
                m = metadata.MetaData.fromdict({'source': {'git_url': location}}, config=config)
                source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir)
                new_git_tag = git_tag if git_tag else get_latest_git_tag(config)
                p = subprocess.Popen(['git', 'checkout', new_git_tag], stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE, cwd=config.work_dir)
                stdout, stderr = p.communicate()
                stdout = stdout.decode('utf-8')
                stderr = stderr.decode('utf-8')
                if p.returncode:
                    sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" %
                             (new_git_tag, stderr.strip()))
                if stdout:
                    print(stdout, file=sys.stdout)
                if stderr:
                    print(stderr, file=sys.stderr)
            else:
                m = metadata.MetaData.fromdict({'source': {'url': location}}, config=config)
                source.unpack(m.get_section('source'), m.config.work_dir, m.config.src_cache,
                              output_dir, m.config.work_dir)
            DESCRIPTION = join(config.work_dir, "DESCRIPTION")
            if not isfile(DESCRIPTION):
                sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION")
                sub_description_name = join(config.work_dir, location.split('/')[-1], "DESCRIPTION")
                if isfile(sub_description_pkg):
                    DESCRIPTION = sub_description_pkg
                elif isfile(sub_description_name):
                    DESCRIPTION = sub_description_name
                else:
                    sys.exit("%s does not appear to be a valid R package "
                             "(no DESCRIPTION file in %s, %s)"
                                 % (location, sub_description_pkg, sub_description_name))

            with open(DESCRIPTION) as f:
                description_text = clear_whitespace(f.read())

            d = dict_from_cran_lines(remove_package_line_continuations(
                description_text.splitlines()))
            d['orig_description'] = description_text
            package = d['Package'].lower()
            cran_metadata[package] = d
        else:
            package = pkg_name

        if pkg_name not in cran_metadata:
            sys.exit("Package %s not found" % pkg_name)

        # Make sure package always uses the CRAN capitalization
        package = cran_metadata[package.lower()]['Package']

        if not is_github_url and not is_tarfile:
            session = get_session(output_dir)
            cran_metadata[package.lower()].update(get_package_metadata(cran_url,
            package, session))

        cran_package = cran_metadata[package.lower()]

        package_dicts[package.lower()].update(
            {
                'cran_packagename': package,
                'packagename': 'r-' + package.lower(),
                'patches': '',
                'build_number': 0,
                'build_depends': '',
                'host_depends': '',
                'run_depends': '',
                # CRAN doesn't seem to have this metadata :(
                'home_comment': '#',
                'homeurl': '',
                'summary_comment': '#',
                'summary': '',
            })
        d = package_dicts[package.lower()]
        d['binary1'] = ''
        d['binary2'] = ''

        if version:
            d['version'] = version
            raise NotImplementedError("Package versions from CRAN are not yet implemented")

        d['cran_version'] = cran_package['Version']
        # Conda versions cannot have -. Conda (verlib) will treat _ as a .
        d['conda_version'] = d['cran_version'].replace('-', '_')
        if version_compare:
            sys.exit(not version_compare(dir_path, d['conda_version']))

        patches = []
        script_env = []
        extra_recipe_maintainers = []
        build_number = 0
        if update_policy.startswith('merge') and inputs['old-metadata']:
            m = inputs['old-metadata']
            patches = make_array(m, 'source/patches')
            script_env = make_array(m, 'build/script_env')
            extra_recipe_maintainers = make_array(m, 'extra/recipe-maintainers', add_maintainer)
            if m.version() == d['conda_version']:
                build_number = int(m.get_value('build/number', 0))
                build_number += 1 if update_policy == 'merge-incr-build-num' else 0
        if add_maintainer:
            new_maintainer = "{indent}{add_maintainer}".format(indent=INDENT,
                                                               add_maintainer=add_maintainer)
            if new_maintainer not in extra_recipe_maintainers:
                if not len(extra_recipe_maintainers):
                    # We hit this case when there is no existing recipe.
                    extra_recipe_maintainers = make_array({}, 'extra/recipe-maintainers', True)
                extra_recipe_maintainers.append(new_maintainer)
        if len(extra_recipe_maintainers):
            extra_recipe_maintainers[1:].sort()
            extra_recipe_maintainers.insert(0, "extra:\n  ")
        d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers)
        d['patches'] = ''.join(patches)
        d['script_env'] = ''.join(script_env)
        d['build_number'] = build_number

        cached_path = None
        cran_layout = {'source': {'selector': '{others}',
                                  'dir': 'src/contrib/',
                                  'ext': '.tar.gz',
                                  # If we had platform filters we would change this to:
                                  # build_for_linux or is_github_url or is_tarfile
                                  'use_this': True},
                       'win-64': {'selector': 'win64',
                                  'dir': 'bin/windows/contrib/{}/'.format(use_binaries_ver),
                                  'ext': '.zip',
                                  'use_this': True if use_binaries_ver else False},
                       'osx-64': {'selector': 'osx',
                                  'dir': 'bin/macosx/el-capitan/contrib/{}/'.format(
                                      use_binaries_ver),
                                  'ext': '.tgz',
                                  'use_this': True if use_binaries_ver else False}}
        available = {}
        for archive_type, archive_details in iteritems(cran_layout):
            contrib_url = ''
            if archive_details['use_this']:
                if is_tarfile:
                    filename = basename(location)
                    contrib_url = relpath(location, dir_path)
                    contrib_url_rendered = package_url = contrib_url
                    sha256 = hashlib.sha256()
                    cached_path = location
                elif not is_github_url:
                    filename_rendered = '{}_{}{}'.format(
                        package, d['cran_version'], archive_details['ext'])
                    filename = '{}_{{{{ version }}}}'.format(package) + archive_details['ext']
                    contrib_url = '{{{{ cran_mirror }}}}/{}'.format(archive_details['dir'])
                    contrib_url_rendered = cran_url + '/{}'.format(archive_details['dir'])
                    package_url = contrib_url_rendered + filename_rendered
                    sha256 = hashlib.sha256()
                    print("Downloading {} from {}".format(archive_type, package_url))
                    # We may need to inspect the file later to determine which compilers are needed.
                    cached_path, _ = source.download_to_cache(
                        config.src_cache, '',
                        {'url': package_url, 'fn': archive_type + '-' + filename_rendered})
                available_details = {}
                available_details['selector'] = archive_details['selector']
                if cached_path:
                    sha256.update(open(cached_path, 'rb').read())
                    available_details['filename'] = filename
                    available_details['contrib_url'] = contrib_url
                    available_details['contrib_url_rendered'] = contrib_url_rendered
                    available_details['cranurl'] = package_url
                    available_details['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest())
                    available_details['cached_path'] = cached_path
                # This is rubbish; d[] should be renamed global[] and should be
                #      merged into source and binaryN.
                if archive_type == 'source':
                    if is_github_url:
                        available_details['url_key'] = ''
                        available_details['fn_key'] = ''
                        available_details['git_url_key'] = 'git_url:'
                        available_details['git_tag_key'] = 'git_tag:'
                        hash_msg = '# You can add a hash for the file here, (md5, sha1 or sha256)'
                        available_details['hash_entry'] = hash_msg
                        available_details['filename'] = ''
                        available_details['cranurl'] = ''
                        available_details['git_url'] = url
                        available_details['git_tag'] = new_git_tag
                        available_details['archive_keys'] = ''
                    else:
                        available_details['url_key'] = 'url:'
                        available_details['fn_key'] = 'fn:'
                        available_details['git_url_key'] = ''
                        available_details['git_tag_key'] = ''
                        available_details['cranurl'] = ' ' + contrib_url + filename
                        available_details['git_url'] = ''
                        available_details['git_tag'] = ''
                available_details['patches'] = d['patches']
                available[archive_type] = available_details

        # Figure out the selectors according to what is available.
        _all = ['linux', 'win32', 'win64', 'osx']
        from_source = _all[:]
        binary_id = 1
        for archive_type, archive_details in iteritems(available):
            if archive_type != 'source':
                sel = archive_details['selector']
                from_source.remove(sel)
                binary_id += 1
            else:
                for k, v in iteritems(archive_details):
                    d[k] = v
        if from_source == _all:
            sel_src = ""
            sel_src_and_win = '  # [win]'
            sel_src_not_win = '  # [not win]'
        else:
            sel_src = '  # [' + ' or '.join(from_source) + ']'
            sel_src_and_win = '  # [' + ' or '.join(fs for fs in from_source if
                                                    fs.startswith('win')) + ']'
            sel_src_not_win = '  # [' + ' or '.join(fs for fs in from_source if not
                                                    fs.startswith('win')) + ']'

        d['sel_src'] = sel_src
        d['sel_src_and_win'] = sel_src_and_win
        d['sel_src_not_win'] = sel_src_not_win

        if 'source' in available:
            available_details = available['source']
            available_details['sel'] = sel_src
            filename = available_details['filename']
            if 'contrib_url' in available_details:
                contrib_url = available_details['contrib_url']
                if archive:
                    if is_tarfile:
                        available_details['cranurl'] = (INDENT + contrib_url)
                    else:
                        available_details['cranurl'] = (INDENT + contrib_url +
                            filename + sel_src + INDENT + contrib_url +
                            'Archive/{}/'.format(package) + filename + sel_src)
                else:
                    available_details['cranurl'] = ' ' + contrib_url + filename + sel_src
            if not is_github_url:
                available_details['archive_keys'] = '{fn_key} {filename} {sel}\n' \
                                                    '  {url_key}{sel}' \
                                                    '    {cranurl}\n' \
                                                    '  {hash_entry}{sel}'.format(
                    **available_details)

        d['cran_metadata'] = '\n'.join(['# %s' % l for l in
            cran_package['orig_lines'] if l])

        # Render the source and binaryN keys
        binary_id = 1
        for archive_type, archive_details in iteritems(available):
            if archive_type == 'source':
                d['source'] = SOURCE_META.format(**archive_details)
            else:
                archive_details['sel'] = '  # [' + archive_details['selector'] + ']'
                d['binary' + str(binary_id)] = BINARY_META.format(**archive_details)
                binary_id += 1

        # XXX: We should maybe normalize these
        d['license'] = cran_package.get("License", "None")
        d['license_family'] = guess_license_family(d['license'], allowed_license_families)

        if 'License_is_FOSS' in cran_package:
            d['license'] += ' (FOSS)'
        if cran_package.get('License_restricts_use') == 'yes':
            d['license'] += ' (Restricts use)'

        if "URL" in cran_package:
            d['home_comment'] = ''
            d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
        else:
            # use CRAN page as homepage if nothing has been specified
            d['home_comment'] = ''
            if is_github_url:
                d['homeurl'] = ' {}'.format(location)
            else:
                d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package)

        if not use_noarch_generic or cran_package.get("NeedsCompilation", 'no') == 'yes':
            d['noarch_generic'] = ''
        else:
            d['noarch_generic'] = 'noarch: generic'

        if 'Description' in cran_package:
            d['summary_comment'] = ''
            d['summary'] = ' ' + yaml_quote_string(cran_package['Description'])

        if "Suggests" in cran_package:
            d['suggests'] = "# Suggests: %s" % cran_package['Suggests']
        else:
            d['suggests'] = ''

        # Every package depends on at least R.
        # I'm not sure what the difference between depends and imports is.
        depends = [s.strip() for s in cran_package.get('Depends',
            '').split(',') if s.strip()]
        imports = [s.strip() for s in cran_package.get('Imports',
            '').split(',') if s.strip()]
        links = [s.strip() for s in cran_package.get("LinkingTo",
            '').split(',') if s.strip()]

        dep_dict = {}

        seen = set()
        for s in list(chain(imports, depends, links)):
            match = VERSION_DEPENDENCY_REGEX.match(s)
            if not match:
                sys.exit("Could not parse version from dependency of %s: %s" %
                    (package, s))
            name = match.group('name')
            if name in seen:
                continue
            seen.add(name)
            archs = match.group('archs')
            relop = match.group('relop') or ''
            ver = match.group('version') or ''
            ver = ver.replace('-', '_')
            # If there is a relop there should be a version
            assert not relop or ver

            if archs:
                sys.exit("Don't know how to handle archs from dependency of "
                "package %s: %s" % (package, s))

            dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver)

        if 'R' not in dep_dict:
            dep_dict['R'] = ''

        need_git = is_github_url
        if cran_package.get("NeedsCompilation", 'no') == 'yes':
            with tarfile.open(available['source']['cached_path']) as tf:
                need_f = any([f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf])
                # Fortran builds use CC to perform the link (they do not call the linker directly).
                need_c = True if need_f else \
                    any([f.name.lower().endswith('.c') for f in tf])
                need_cxx = any([f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++'))
                                         for f in tf])
                need_autotools = any([f.name.lower().endswith('/configure') for f in tf])
                need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \
                    any([f.name.lower().endswith(('/makefile', '/makevars'))
                        for f in tf])
        else:
            need_c = need_cxx = need_f = need_autotools = need_make = False

        if 'Rcpp' in dep_dict or 'RcppArmadillo' in dep_dict:
            need_cxx = True

        if need_cxx:
            need_c = True

        for dep_type in ['build', 'host', 'run']:

            deps = []
            # Put non-R dependencies first.
            if dep_type == 'build':
                if need_c:
                    deps.append("{indent}{{{{ compiler('c') }}}}      {sel}".format(
                        indent=INDENT, sel=sel_src_not_win))
                if need_cxx:
                    deps.append("{indent}{{{{ compiler('cxx') }}}}    {sel}".format(
                        indent=INDENT, sel=sel_src_not_win))
                if need_f:
                    deps.append("{indent}{{{{ compiler('fortran') }}}}{sel}".format(
                        indent=INDENT, sel=sel_src_not_win))
                if use_rtools_win:
                    need_c = need_cxx = need_f = need_autotools = need_make = False
                    deps.append("{indent}{{{{native}}}}rtools      {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{native}}}}extsoft     {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                if need_c or need_cxx or need_f:
                    deps.append("{indent}{{{{native}}}}toolchain      {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                if need_autotools or need_make or need_git:
                    deps.append("{indent}{{{{posix}}}}filesystem      {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                if need_git:
                    deps.append("{indent}{{{{posix}}}}git".format(indent=INDENT))
                if need_autotools:
                    deps.append("{indent}{{{{posix}}}}sed             {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}grep            {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}autoconf        {sel}".format(
                        indent=INDENT, sel=sel_src))
                    deps.append("{indent}{{{{posix}}}}automake-wrapper{sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}automake        {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))
                    deps.append("{indent}{{{{posix}}}}pkg-config".format(indent=INDENT))
                if need_make:
                    deps.append("{indent}{{{{posix}}}}make            {sel}".format(
                        indent=INDENT, sel=sel_src))
            elif dep_type == 'run':
                if need_c or need_cxx or need_f:
                    deps.append("{indent}{{{{native}}}}gcc-libs       {sel}".format(
                        indent=INDENT, sel=sel_src_and_win))

            if dep_type == 'host' or dep_type == 'run':
                for name in sorted(dep_dict):
                    if name in R_BASE_PACKAGE_NAMES:
                        continue
                    if name == 'R':
                        # Put R first
                        # Regarless of build or run, and whether this is a
                        # recommended package or not, it can only depend on
                        # r_interp since anything else can and will cause
                        # cycles in the dependency graph. The cran metadata
                        # lists all dependencies anyway, even those packages
                        # that are in the recommended group.
                        # We don't include any R version restrictions because
                        # conda-build always pins r-base and mro-base version.
                        deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_interp))
                    else:
                        conda_name = 'r-' + name.lower()

                        if dep_dict[name]:
                            deps.append('{indent}{name} {version}'.format(name=conda_name,
                                version=dep_dict[name], indent=INDENT))
                        else:
                            deps.append('{indent}{name}'.format(name=conda_name,
                                indent=INDENT))
                        if recursive:
                            lower_name = name.lower()
                            if lower_name not in package_dicts:
                                inputs_dict = package_to_inputs_dict(output_dir, output_suffix,
                                                                     git_tag, lower_name)
                                assert lower_name == inputs_dict['pkg-name'], \
                                    "name %s != inputs_dict['pkg-name'] %s" % (
                                        name, inputs_dict['pkg-name'])
                                assert lower_name not in package_list
                                package_dicts.update({lower_name: {'inputs': inputs_dict}})
                                package_list.append(lower_name)

            d['%s_depends' % dep_type] = ''.join(deps)

    for package in package_dicts:
        d = package_dicts[package]
        dir_path = d['inputs']['new-location']
        if exists(dir_path) and not version_compare:
            if update_policy == 'error':
                raise RuntimeError("directory already exists "
                                   "(and --update-policy is 'error'): %s" % dir_path)
            elif update_policy == 'overwrite':
                rm_rf(dir_path)
        elif update_policy == 'skip-up-to-date' and up_to_date(cran_metadata,
                                                               d['inputs']['old-metadata']):
            continue
        elif update_policy == 'skip-existing' and d['inputs']['old-metadata']:
            continue

        # Normalize the metadata values
        d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore')
             .decode() for k, v in iteritems(d)}
        try:
            makedirs(join(dir_path))
        except:
            pass
        print("Writing recipe for %s" % package.lower())
        with open(join(dir_path, 'meta.yaml'), 'w') as f:
            f.write(clear_whitespace(CRAN_META.format(**d)))
        if not exists(join(dir_path, 'build.sh')) or update_policy == 'overwrite':
            with open(join(dir_path, 'build.sh'), 'w') as f:
                if from_source == all:
                    f.write(CRAN_BUILD_SH_SOURCE.format(**d))
                elif from_source == []:
                    f.write(CRAN_BUILD_SH_BINARY.format(**d))
                else:
                    tpbt = [target_platform_bash_test_by_sel[t] for t in from_source]
                    d['source_pf_bash'] = ' || '.join(['[[ $target_platform ' + s + ' ]]'
                                                  for s in tpbt])
                    f.write(CRAN_BUILD_SH_MIXED.format(**d))

        if not exists(join(dir_path, 'bld.bat')) or update_policy == 'overwrite':
            with open(join(dir_path, 'bld.bat'), 'w') as f:
                if len([fs for fs in from_source if fs.startswith('win')]) == 2:
                    f.write(CRAN_BLD_BAT_SOURCE.format(**d))
                else:
                    f.write(CRAN_BLD_BAT_MIXED.format(**d))
示例#41
0
def skeletonize(in_packages,
                output_dir=".",
                output_suffix="",
                add_maintainer=None,
                version=None,
                git_tag=None,
                cran_url="https://cran.r-project.org/",
                recursive=False,
                archive=True,
                version_compare=False,
                update_policy='',
                config=None):

    output_dir = realpath(output_dir)

    if not config:
        config = Config()

    if len(in_packages) > 1 and version_compare:
        raise ValueError(
            "--version-compare only works with one package at a time")
    if update_policy == 'error' and not in_packages:
        raise ValueError("At least one package must be supplied")

    package_dicts = {}
    package_list = []

    cran_metadata = get_cran_metadata(cran_url, output_dir)

    # r_recipes_in_output_dir = []
    # recipes = listdir(output_dir)
    # for recipe in recipes:
    #     if not recipe.startswith('r-') or not isdir(recipe):
    #         continue
    #     r_recipes_in_output_dir.append(recipe)

    for package in in_packages:
        inputs_dict = package_to_inputs_dict(output_dir, output_suffix,
                                             git_tag, package)
        if inputs_dict:
            package_dicts.update(
                {inputs_dict['pkg-name']: {
                     'inputs': inputs_dict
                 }})

    for package_name, package_dict in package_dicts.items():
        package_list.append(package_name)

    while package_list:
        inputs = package_dicts[package_list.pop()]['inputs']
        location = inputs['location']
        pkg_name = inputs['pkg-name']
        is_github_url = location and 'github.com' in location
        url = inputs['location']

        dir_path = inputs['new-location']
        print("Making/refreshing recipe for {}".format(pkg_name))

        # Bodges GitHub packages into cran_metadata
        if is_github_url:
            rm_rf(config.work_dir)
            m = metadata.MetaData.fromdict({'source': {
                'git_url': location
            }},
                                           config=config)
            source.git_source(m.get_section('source'), m.config.git_cache,
                              m.config.work_dir)
            new_git_tag = git_tag if git_tag else get_latest_git_tag(config)
            p = subprocess.Popen(['git', 'checkout', new_git_tag],
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=config.work_dir)
            stdout, stderr = p.communicate()
            stdout = stdout.decode('utf-8')
            stderr = stderr.decode('utf-8')
            if p.returncode:
                sys.exit(
                    "Error: 'git checkout %s' failed (%s).\nInvalid tag?" %
                    (new_git_tag, stderr.strip()))
            if stdout:
                print(stdout, file=sys.stdout)
            if stderr:
                print(stderr, file=sys.stderr)

            DESCRIPTION = join(config.work_dir, "DESCRIPTION")
            if not isfile(DESCRIPTION):
                sub_description_pkg = join(config.work_dir, 'pkg',
                                           "DESCRIPTION")
                sub_description_name = join(config.work_dir,
                                            location.split('/')[-1],
                                            "DESCRIPTION")
                if isfile(sub_description_pkg):
                    DESCRIPTION = sub_description_pkg
                elif isfile(sub_description_name):
                    DESCRIPTION = sub_description_name
                else:
                    sys.exit(
                        "%s does not appear to be a valid R package "
                        "(no DESCRIPTION file in %s, %s)" %
                        (location, sub_description_pkg, sub_description_name))

            with open(DESCRIPTION) as f:
                description_text = clear_trailing_whitespace(f.read())

            d = dict_from_cran_lines(
                remove_package_line_continuations(
                    description_text.splitlines()))
            d['orig_description'] = description_text
            package = d['Package'].lower()
            cran_metadata[package] = d
        else:
            package = pkg_name

        if pkg_name not in cran_metadata:
            sys.exit("Package %s not found" % pkg_name)

        # Make sure package always uses the CRAN capitalization
        package = cran_metadata[package.lower()]['Package']

        if not is_github_url:
            session = get_session(output_dir)
            cran_metadata[package.lower()].update(
                get_package_metadata(cran_url, package, session))

        cran_package = cran_metadata[package.lower()]

        package_dicts[package.lower()].update({
            'cran_packagename':
            package,
            'packagename':
            'r-' + package.lower(),
            'patches':
            '',
            'build_number':
            0,
            'build_depends':
            '',
            'run_depends':
            '',
            # CRAN doesn't seem to have this metadata :(
            'home_comment':
            '#',
            'homeurl':
            '',
            'summary_comment':
            '#',
            'summary':
            '',
        })
        d = package_dicts[package.lower()]
        if is_github_url:
            d['url_key'] = ''
            d['fn_key'] = ''
            d['git_url_key'] = 'git_url:'
            d['git_tag_key'] = 'git_tag:'
            d['hash_entry'] = '# You can add a hash for the file here, like md5, sha1 or sha256'
            d['filename'] = ''
            d['cranurl'] = ''
            d['git_url'] = url
            d['git_tag'] = new_git_tag
        else:
            d['url_key'] = 'url:'
            d['fn_key'] = 'fn:'
            d['git_url_key'] = ''
            d['git_tag_key'] = ''
            d['git_url'] = ''
            d['git_tag'] = ''
            d['hash_entry'] = ''

        if version:
            d['version'] = version
            raise NotImplementedError(
                "Package versions from CRAN are not yet implemented")

        d['cran_version'] = cran_package['Version']
        # Conda versions cannot have -. Conda (verlib) will treat _ as a .
        d['conda_version'] = d['cran_version'].replace('-', '_')
        if version_compare:
            sys.exit(not version_compare(dir_path, d['conda_version']))

        patches = []
        script_env = []
        extra_recipe_maintainers = []
        build_number = 0
        if update_policy.startswith('merge') and inputs['old-metadata']:
            m = inputs['old-metadata']
            patches = make_array(m, 'source/patches')
            script_env = make_array(m, 'build/script_env')
            extra_recipe_maintainers = make_array(m,
                                                  'extra/recipe-maintainers',
                                                  add_maintainer)
            if m.version() == d['conda_version']:
                build_number = int(m.get_value('build/number', 0))
                build_number += 1 if update_policy == 'merge-incr-build-num' else 0
        if not len(patches):
            patches.append("# patches:\n")
            patches.append("   # List any patch files here\n")
            patches.append("   # - fix.patch")
        if add_maintainer:
            new_maintainer = "{indent}{add_maintainer}".format(
                indent=INDENT, add_maintainer=add_maintainer)
            if new_maintainer not in extra_recipe_maintainers:
                if not len(extra_recipe_maintainers):
                    # We hit this case when there is no existing recipe.
                    extra_recipe_maintainers = make_array(
                        {}, 'extra/recipe-maintainers', True)
                extra_recipe_maintainers.append(new_maintainer)
        if len(extra_recipe_maintainers):
            extra_recipe_maintainers[1:].sort()
            extra_recipe_maintainers.insert(0, "extra:\n  ")
        d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers)
        d['patches'] = ''.join(patches)
        d['script_env'] = ''.join(script_env)
        d['build_number'] = build_number

        cached_path = None
        if not is_github_url:
            filename = '{}_{}.tar.gz'
            contrib_url = cran_url + 'src/contrib/'
            package_url = contrib_url + filename.format(
                package, d['cran_version'])

            # calculate sha256 by downloading source
            sha256 = hashlib.sha256()
            print("Downloading source from {}".format(package_url))
            # We may need to inspect the file later to determine which compilers are needed.
            cached_path, _ = source.download_to_cache(
                config.src_cache, '', dict({'url': package_url}))
            sha256.update(open(cached_path, 'rb').read())
            d['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest())

            d['filename'] = filename.format(package, '{{ version }}')
            if archive:
                d['cranurl'] = (INDENT + contrib_url + d['filename'] + INDENT +
                                contrib_url + 'Archive/{}/'.format(package) +
                                d['filename'])
            else:
                d['cranurl'] = ' ' + cran_url + 'src/contrib/' + d['filename']

        d['cran_metadata'] = '\n'.join(
            ['# %s' % l for l in cran_package['orig_lines'] if l])

        # XXX: We should maybe normalize these
        d['license'] = cran_package.get("License", "None")
        d['license_family'] = guess_license_family(d['license'],
                                                   allowed_license_families)

        if 'License_is_FOSS' in cran_package:
            d['license'] += ' (FOSS)'
        if cran_package.get('License_restricts_use') == 'yes':
            d['license'] += ' (Restricts use)'

        if "URL" in cran_package:
            d['home_comment'] = ''
            d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
        else:
            # use CRAN page as homepage if nothing has been specified
            d['home_comment'] = ''
            if is_github_url:
                d['homeurl'] = ' {}'.format(location)
            else:
                d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(
                    package)

        if 'Description' in cran_package:
            d['summary_comment'] = ''
            d['summary'] = ' ' + yaml_quote_string(cran_package['Description'])

        if "Suggests" in cran_package:
            d['suggests'] = "# Suggests: %s" % cran_package['Suggests']
        else:
            d['suggests'] = ''

        # Every package depends on at least R.
        # I'm not sure what the difference between depends and imports is.
        depends = [
            s.strip() for s in cran_package.get('Depends', '').split(',')
            if s.strip()
        ]
        imports = [
            s.strip() for s in cran_package.get('Imports', '').split(',')
            if s.strip()
        ]
        links = [
            s.strip() for s in cran_package.get("LinkingTo", '').split(',')
            if s.strip()
        ]

        dep_dict = {}

        seen = set()
        for s in list(chain(imports, depends, links)):
            match = VERSION_DEPENDENCY_REGEX.match(s)
            if not match:
                sys.exit("Could not parse version from dependency of %s: %s" %
                         (package, s))
            name = match.group('name')
            if name in seen:
                continue
            seen.add(name)
            archs = match.group('archs')
            relop = match.group('relop') or ''
            ver = match.group('version') or ''
            ver = ver.replace('-', '_')
            # If there is a relop there should be a version
            assert not relop or ver

            if archs:
                sys.exit("Don't know how to handle archs from dependency of "
                         "package %s: %s" % (package, s))

            dep_dict[name] = '{relop}{version}'.format(relop=relop,
                                                       version=ver)

        if 'R' not in dep_dict:
            dep_dict['R'] = ''

        need_git = is_github_url
        if cran_package.get("NeedsCompilation", 'no') == 'yes':
            with tarfile.open(cached_path) as tf:
                need_f = any([
                    f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf
                ])
                # Fortran builds use CC to perform the link (they do not call the linker directly).
                need_c = True if need_f else \
                    any([f.name.lower().endswith('.c') for f in tf])
                need_cxx = any([
                    f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++'))
                    for f in tf
                ])
                need_autotools = any(
                    [f.name.lower().endswith('/configure') for f in tf])
                need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \
                    any([f.name.lower().endswith(('/makefile', '/makevars'))
                        for f in tf])
        else:
            need_c = need_cxx = need_f = need_autotools = need_make = False
        for dep_type in ['build', 'run']:

            deps = []
            # Put non-R dependencies first.
            if dep_type == 'build':
                if need_c:
                    deps.append(
                        "{indent}{{{{ compiler('c') }}}}        # [not win]".
                        format(indent=INDENT))
                if need_cxx:
                    deps.append(
                        "{indent}{{{{ compiler('cxx') }}}}      # [not win]".
                        format(indent=INDENT))
                if need_f:
                    deps.append(
                        "{indent}{{{{ compiler('fortran') }}}}  # [not win]".
                        format(indent=INDENT))
                if need_c or need_cxx or need_f:
                    deps.append(
                        "{indent}{{{{native}}}}toolchain        # [win]".
                        format(indent=INDENT))
                if need_autotools or need_make or need_git:
                    deps.append(
                        "{indent}{{{{posix}}}}filesystem        # [win]".
                        format(indent=INDENT))
                if need_git:
                    deps.append(
                        "{indent}{{{{posix}}}}git".format(indent=INDENT))
                if need_autotools:
                    deps.append(
                        "{indent}{{{{posix}}}}sed               # [win]".
                        format(indent=INDENT))
                    deps.append(
                        "{indent}{{{{posix}}}}grep              # [win]".
                        format(indent=INDENT))
                    deps.append(
                        "{indent}{{{{posix}}}}autoconf".format(indent=INDENT))
                    deps.append(
                        "{indent}{{{{posix}}}}automake".format(indent=INDENT))
                    deps.append("{indent}{{{{posix}}}}pkg-config".format(
                        indent=INDENT))
                if need_make:
                    deps.append(
                        "{indent}{{{{posix}}}}make".format(indent=INDENT))
            elif dep_type == 'run':
                if need_c or need_cxx or need_f:
                    deps.append(
                        "{indent}{{{{native}}}}gcc-libs         # [win]".
                        format(indent=INDENT))

            for name in sorted(dep_dict):
                if name in R_BASE_PACKAGE_NAMES:
                    continue
                if name == 'R':
                    # Put R first
                    # Regarless of build or run, and whether this is a recommended package or not,
                    # it can only depend on 'r-base' since anything else can and will cause cycles
                    # in the dependency graph. The cran metadata lists all dependencies anyway, even
                    # those packages that are in the recommended group.
                    r_name = 'r-base'
                    # We don't include any R version restrictions because we
                    # always build R packages against an exact R version
                    deps.insert(
                        0, '{indent}{r_name}'.format(indent=INDENT,
                                                     r_name=r_name))
                else:
                    conda_name = 'r-' + name.lower()

                    if dep_dict[name]:
                        deps.append('{indent}{name} {version}'.format(
                            name=conda_name,
                            version=dep_dict[name],
                            indent=INDENT))
                    else:
                        deps.append('{indent}{name}'.format(name=conda_name,
                                                            indent=INDENT))
                    if recursive:
                        lower_name = name.lower()
                        if lower_name not in package_dicts:
                            inputs_dict = package_to_inputs_dict(
                                output_dir, output_suffix, git_tag, lower_name)
                            assert lower_name == inputs_dict['pkg-name'], \
                                "name %s != inputs_dict['pkg-name'] %s" % (name,
                                                                           inputs_dict['pkg-name'])
                            assert lower_name not in package_list
                            package_dicts.update(
                                {lower_name: {
                                    'inputs': inputs_dict
                                }})
                            package_list.append(lower_name)

            d['%s_depends' % dep_type] = ''.join(deps)

    for package in package_dicts:
        d = package_dicts[package]
        dir_path = d['inputs']['new-location']
        if exists(dir_path) and not version_compare:
            if update_policy == 'error':
                raise RuntimeError("directory already exists "
                                   "(and --update-policy is 'error'): %s" %
                                   dir_path)
            elif update_policy == 'overwrite':
                rm_rf(dir_path)
        elif update_policy == 'skip-up-to-date' and up_to_date(
                cran_metadata, d['inputs']['old-metadata']):
            continue
        elif update_policy == 'skip-existing' and d['inputs']['old-metadata']:
            continue

    # Normalize the metadata values
        d = {
            k: unicodedata.normalize("NFKD",
                                     text_type(v)).encode('ascii',
                                                          'ignore').decode()
            for k, v in iteritems(d)
        }
        try:
            makedirs(join(dir_path))
        except:
            pass
        print("Writing recipe for %s" % package.lower())
        with open(join(dir_path, 'meta.yaml'), 'w') as f:
            f.write(clear_trailing_whitespace(CRAN_META.format(**d)))
        if not exists(join(dir_path,
                           'build.sh')) or update_policy == 'overwrite':
            with open(join(dir_path, 'build.sh'), 'w') as f:
                f.write(CRAN_BUILD_SH.format(**d))
        if not exists(join(dir_path,
                           'bld.bat')) or update_policy == 'overwrite':
            with open(join(dir_path, 'bld.bat'), 'w') as f:
                f.write(CRAN_BLD_BAT.format(**d))
示例#42
0
def get_package_metadata(package, d, data, output_dir, python_version, all_extras,
                         recursive, created_recipes, noarch_python, noprompt, packages,
                         extra_specs, config, setup_options):

    print("Downloading %s" % package)

    pkginfo = get_pkginfo(package,
                          filename=d['filename'],
                          pypiurl=d['pypiurl'],
                          md5=d['md5'],
                          python_version=python_version,
                          extra_specs=extra_specs,
                          setup_options=setup_options,
                          config=config)

    setuptools_build = pkginfo.get('setuptools', False)
    setuptools_run = False

    # Look at the entry_points and construct console_script and
    #  gui_scripts entry_points for conda
    entry_points = pkginfo.get('entry_points', [])
    if entry_points:
        if isinstance(entry_points, str):
            # makes sure it is left-shifted
            newstr = "\n".join(x.strip()
                                for x in entry_points.splitlines())
            _config = configparser.ConfigParser()
            entry_points = {}
            try:
                _config.readfp(StringIO(newstr))
            except Exception as err:
                print("WARNING: entry-points not understood: ",
                        err)
                print("The string was", newstr)
                entry_points = pkginfo['entry_points']
            else:
                setuptools_run = True
                for section in _config.sections():
                    if section in ['console_scripts', 'gui_scripts']:
                        value = ['%s=%s' % (option, _config.get(section, option))
                                    for option in _config.options(section)]
                        entry_points[section] = value
        if not isinstance(entry_points, dict):
            print("WARNING: Could not add entry points. They were:")
            print(entry_points)
        else:
            cs = entry_points.get('console_scripts', [])
            gs = entry_points.get('gui_scripts', [])
            if isinstance(cs, string_types):
                cs = [cs]
            if isinstance(gs, string_types):
                gs = [gs]
            # We have *other* kinds of entry-points so we need
            # setuptools at run-time
            if set(entry_points.keys()) - {'console_scripts', 'gui_scripts'}:
                setuptools_build = True
                setuptools_run = True
            # TODO: Use pythonw for gui scripts
            entry_list = (cs + gs)
            if len(cs + gs) != 0:
                d['entry_points'] = INDENT.join([''] + entry_list)
                d['entry_comment'] = ''
                d['build_comment'] = ''
                d['test_commands'] = INDENT.join([''] + make_entry_tests(entry_list))

    requires = get_requirements(package, pkginfo, all_extras=all_extras)

    if requires or setuptools_build or setuptools_run:
        deps = []
        if setuptools_run:
            deps.append('setuptools')
        for deptext in requires:
            if isinstance(deptext, string_types):
                deptext = deptext.splitlines()
            # Every item may be a single requirement
            #  or a multiline requirements string...
            for dep in deptext:
                # ... and may also contain comments...
                dep = dep.split('#')[0].strip()
                if dep:  # ... and empty (or comment only) lines
                    spec = spec_from_line(dep)
                    if spec is None:
                        sys.exit("Error: Could not parse: %s" % dep)
                    deps.append(spec)

        if 'setuptools' in deps:
            setuptools_build = False
            setuptools_run = False
            d['egg_comment'] = ''
            d['build_comment'] = ''
        d['build_depends'] = INDENT.join([''] +
                                            ['setuptools'] * setuptools_build +
                                            deps)
        d['run_depends'] = INDENT.join([''] +
                                        ['setuptools'] * setuptools_run +
                                        deps)

        if recursive:
            for dep in deps:
                dep = dep.split()[0]
                if not exists(join(output_dir, dep)):
                    if dep not in created_recipes:
                        packages.append(dep)

    if noarch_python:
        d['build_comment'] = ''
        d['noarch_python_comment'] = ''

    if 'packagename' not in d:
        d['packagename'] = pkginfo['name'].lower()
    if d['version'] == 'UNKNOWN':
        d['version'] = pkginfo['version']

    if pkginfo.get('packages'):
        deps = set(pkginfo['packages'])
        if d['import_tests']:
            if not d['import_tests'] or d['import_tests'] == 'PLACEHOLDER':
                olddeps = []
            else:
                olddeps = [x for x in d['import_tests'].split()
                        if x != '-']
            deps = set(olddeps) | deps
        d['import_tests'] = INDENT.join(sorted(deps))
        d['import_comment'] = ''

        d['tests_require'] = INDENT.join(sorted([spec_from_line(pkg) for pkg
                                                    in pkginfo['tests_require']]))

    if pkginfo.get('homeurl'):
        d['homeurl'] = pkginfo['homeurl']
    else:
        if data and 'homeurl' in data:
            d['homeurl'] = data['homeurl']
        else:
            d['homeurl'] = "The package home page"
            d['home_comment'] = '#'

    if pkginfo.get('summary'):
        d['summary'] = repr(pkginfo['summary'])
    else:
        if data:
            d['summary'] = repr(data['summary'])
        else:
            d['summary'] = "Summary of the package"
            d['summary_comment'] = '#'
    if d['summary'].startswith("u'") or d['summary'].startswith('u"'):
        d['summary'] = d['summary'][1:]

    license_classifier = "License :: OSI Approved :: "
    if pkginfo.get('classifiers'):
        licenses = [classifier.split(license_classifier, 1)[1] for
            classifier in pkginfo['classifiers'] if classifier.startswith(license_classifier)]
    elif data and 'classifiers' in data:
        licenses = [classifier.split(license_classifier, 1)[1] for classifier in
                data['classifiers'] if classifier.startswith(license_classifier)]
    else:
        licenses = []
    if not licenses:
        if pkginfo.get('license'):
            license_name = pkginfo['license']
        elif data and 'license' in data:
            license_name = data['license']
        else:
            license_name = None
        if license_name:
            if noprompt:
                pass
            elif '\n' not in license_name:
                print('Using "%s" for the license' % license_name)
            else:
                # Some projects put the whole license text in this field
                print("This is the license for %s" % package)
                print()
                print(license_name)
                print()
                license_name = input("What license string should I use? ")
        else:
            if noprompt:
                license_name = "UNKNOWN"
            else:
                license_name = input(("No license could be found for %s on " +
                                    "PyPI or in the source. What license should I use? ") %
                                package)
    else:
        license_name = ' or '.join(licenses)
    d['license'] = license_name
    d['license_family'] = guess_license_family(license_name, allowed_license_families)
示例#43
0
def test_cc():
    fam = guess_license_family('CC0')
    assert fam == 'CC'
示例#44
0
def test_gpl2():
    licenses = {u'GPL-2', u'GPL-2 | file LICENSE',
                u'GNU General Public License v2 or later (GPLv2+)'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'GPL2'
示例#45
0
def skeletonize(packages,
                output_dir=".",
                version=None,
                git_tag=None,
                cran_url="https://cran.r-project.org/",
                recursive=False,
                archive=True,
                version_compare=False,
                update_outdated=False,
                config=None):

    if not config:
        config = Config()

    if len(packages) > 1 and version_compare:
        raise ValueError(
            "--version-compare only works with one package at a time")
    if not update_outdated and not packages:
        raise ValueError("At least one package must be supplied")

    package_dicts = {}

    cran_metadata = get_cran_metadata(cran_url, output_dir)

    if update_outdated:
        packages = get_outdated(output_dir, cran_metadata, packages)
        for pkg in packages:
            rm_rf(join(output_dir[0], 'r-' + pkg))

    while packages:
        package = packages.pop()

        is_github_url = 'github.com' in package
        url = package

        if is_github_url:
            rm_rf(config.work_dir)
            m = metadata.MetaData.fromdict({'source': {
                'git_url': package
            }},
                                           config=config)
            source.git_source(m.get_section('source'), m.config.git_cache,
                              m.config.work_dir)
            git_tag = git_tag[0] if git_tag else get_latest_git_tag(config)
            p = subprocess.Popen(['git', 'checkout', git_tag],
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=config.work_dir)
            stdout, stderr = p.communicate()
            stdout = stdout.decode('utf-8')
            stderr = stderr.decode('utf-8')
            if p.returncode:
                sys.exit(
                    "Error: 'git checkout %s' failed (%s).\nInvalid tag?" %
                    (git_tag, stderr.strip()))
            if stdout:
                print(stdout, file=sys.stdout)
            if stderr:
                print(stderr, file=sys.stderr)

            DESCRIPTION = join(config.work_dir, "DESCRIPTION")
            if not isfile(DESCRIPTION):
                sub_description_pkg = join(config.work_dir, 'pkg',
                                           "DESCRIPTION")
                sub_description_name = join(config.work_dir,
                                            package.split('/')[-1],
                                            "DESCRIPTION")
                if isfile(sub_description_pkg):
                    DESCRIPTION = sub_description_pkg
                elif isfile(sub_description_name):
                    DESCRIPTION = sub_description_name
                else:
                    sys.exit(
                        "%s does not appear to be a valid R package "
                        "(no DESCRIPTION file in %s, %s)" %
                        (package, sub_description_pkg, sub_description_name))

            with open(DESCRIPTION) as f:
                description_text = clear_trailing_whitespace(f.read())

            d = dict_from_cran_lines(
                remove_package_line_continuations(
                    description_text.splitlines()))
            d['orig_description'] = description_text
            package = d['Package'].lower()
            cran_metadata[package] = d

        if package.startswith('r-'):
            package = package[2:]
        if package.endswith('/'):
            package = package[:-1]
        if package.lower() not in cran_metadata:
            sys.exit("Package %s not found" % package)

        # Make sure package is always uses the CRAN capitalization
        package = cran_metadata[package.lower()]['Package']

        if not is_github_url:
            session = get_session(output_dir)
            cran_metadata[package.lower()].update(
                get_package_metadata(cran_url, package, session))

        dir_path = join(output_dir, 'r-' + package.lower())
        if exists(dir_path) and not version_compare:
            raise RuntimeError("directory already exists: %s" % dir_path)

        cran_package = cran_metadata[package.lower()]

        d = package_dicts.setdefault(
            package,
            {
                'cran_packagename': package,
                'packagename': 'r-' + package.lower(),
                'build_depends': '',
                'run_depends': '',
                # CRAN doesn't seem to have this metadata :(
                'home_comment': '#',
                'homeurl': '',
                'summary_comment': '#',
                'summary': '',
            })

        if is_github_url:
            d['url_key'] = ''
            d['fn_key'] = ''
            d['git_url_key'] = 'git_url:'
            d['git_tag_key'] = 'git_tag:'
            d['hash_entry'] = '# You can add a hash for the file here, like md5, sha1 or sha256'
            d['filename'] = ''
            d['cranurl'] = ''
            d['git_url'] = url
            d['git_tag'] = git_tag
        else:
            d['url_key'] = 'url:'
            d['fn_key'] = 'fn:'
            d['git_url_key'] = ''
            d['git_tag_key'] = ''
            d['git_url'] = ''
            d['git_tag'] = ''
            d['hash_entry'] = ''

        if version:
            d['version'] = version
            raise NotImplementedError(
                "Package versions from CRAN are not yet implemented")

        d['cran_version'] = cran_package['Version']
        # Conda versions cannot have -. Conda (verlib) will treat _ as a .
        d['conda_version'] = d['cran_version'].replace('-', '_')
        if version_compare:
            sys.exit(not version_compare(dir_path, d['conda_version']))

        if not is_github_url:
            filename = '{}_{}.tar.gz'
            contrib_url = cran_url + 'src/contrib/'
            package_url = contrib_url + filename.format(
                package, d['cran_version'])

            # calculate sha256 by downloading source
            sha256 = hashlib.sha256()
            print("Downloading source from {}".format(package_url))
            sha256.update(urlopen(package_url).read())
            d['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest())

            d['filename'] = filename.format(package, '{{ version }}')
            if archive:
                d['cranurl'] = (INDENT + contrib_url + d['filename'] + INDENT +
                                contrib_url + 'Archive/{}/'.format(package) +
                                d['filename'])
            else:
                d['cranurl'] = ' ' + cran_url + 'src/contrib/' + d['filename']

        d['cran_metadata'] = '\n'.join(
            ['# %s' % l for l in cran_package['orig_lines'] if l])

        # XXX: We should maybe normalize these
        d['license'] = cran_package.get("License", "None")
        d['license_family'] = guess_license_family(d['license'],
                                                   allowed_license_families)

        if 'License_is_FOSS' in cran_package:
            d['license'] += ' (FOSS)'
        if cran_package.get('License_restricts_use') == 'yes':
            d['license'] += ' (Restricts use)'

        if "URL" in cran_package:
            d['home_comment'] = ''
            d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
        else:
            # use CRAN page as homepage if nothing has been specified
            d['home_comment'] = ''
            d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(
                package)

        if 'Description' in cran_package:
            d['summary_comment'] = ''
            d['summary'] = ' ' + yaml_quote_string(cran_package['Description'])

        if "Suggests" in cran_package:
            d['suggests'] = "# Suggests: %s" % cran_package['Suggests']
        else:
            d['suggests'] = ''

        # Every package depends on at least R.
        # I'm not sure what the difference between depends and imports is.
        depends = [
            s.strip() for s in cran_package.get('Depends', '').split(',')
            if s.strip()
        ]
        imports = [
            s.strip() for s in cran_package.get('Imports', '').split(',')
            if s.strip()
        ]
        links = [
            s.strip() for s in cran_package.get("LinkingTo", '').split(',')
            if s.strip()
        ]

        dep_dict = {}

        for s in set(chain(depends, imports, links)):
            match = VERSION_DEPENDENCY_REGEX.match(s)
            if not match:
                sys.exit("Could not parse version from dependency of %s: %s" %
                         (package, s))
            name = match.group('name')
            archs = match.group('archs')
            relop = match.group('relop') or ''
            version = match.group('version') or ''
            version = version.replace('-', '_')
            # If there is a relop there should be a version
            assert not relop or version

            if archs:
                sys.exit("Don't know how to handle archs from dependency of "
                         "package %s: %s" % (package, s))

            dep_dict[name] = '{relop}{version}'.format(relop=relop,
                                                       version=version)

        if 'R' not in dep_dict:
            dep_dict['R'] = ''

        for dep_type in ['build', 'run']:
            deps = []
            for name in sorted(dep_dict):
                if name in R_BASE_PACKAGE_NAMES:
                    continue
                if name == 'R':
                    # Put R first
                    # Regarless of build or run, and whether this is a recommended package or not,
                    # it can only depend on 'r-base' since anything else can and will cause cycles
                    # in the dependency graph. The cran metadata lists all dependencies anyway, even
                    # those packages that are in the recommended group.
                    r_name = 'r-base'
                    # We don't include any R version restrictions because we
                    # always build R packages against an exact R version
                    deps.insert(
                        0, '{indent}{r_name}'.format(indent=INDENT,
                                                     r_name=r_name))
                else:
                    conda_name = 'r-' + name.lower()

                    if dep_dict[name]:
                        deps.append('{indent}{name} {version}'.format(
                            name=conda_name,
                            version=dep_dict[name],
                            indent=INDENT))
                    else:
                        deps.append('{indent}{name}'.format(name=conda_name,
                                                            indent=INDENT))
                    if recursive:
                        if not exists(join(output_dir, conda_name)):
                            packages.append(name)

            if cran_package.get("NeedsCompilation", 'no') == 'yes':
                if dep_type == 'build':
                    deps.append('{indent}posix                # [win]'.format(
                        indent=INDENT))
                    deps.append(
                        '{indent}{{{{native}}}}toolchain  # [win]'.format(
                            indent=INDENT))
                    deps.append(
                        '{indent}gcc                  # [not win]'.format(
                            indent=INDENT))
                elif dep_type == 'run':
                    deps.append(
                        '{indent}{{{{native}}}}gcc-libs   # [win]'.format(
                            indent=INDENT))
                    deps.append(
                        '{indent}libgcc               # [not win]'.format(
                            indent=INDENT))
            d['%s_depends' % dep_type] = ''.join(deps)

    for package in package_dicts:
        d = package_dicts[package]
        name = d['packagename']

        # Normalize the metadata values
        d = {
            k: unicodedata.normalize("NFKD",
                                     text_type(v)).encode('ascii',
                                                          'ignore').decode()
            for k, v in iteritems(d)
        }

        makedirs(join(output_dir, name))
        print("Writing recipe for %s" % package.lower())
        with open(join(output_dir, name, 'meta.yaml'), 'w') as f:
            f.write(clear_trailing_whitespace(CRAN_META.format(**d)))
        with open(join(output_dir, name, 'build.sh'), 'w') as f:
            f.write(CRAN_BUILD_SH.format(**d))
        with open(join(output_dir, name, 'bld.bat'), 'w') as f:
            f.write(CRAN_BLD_BAT.format(**d))

    print("Done")
示例#46
0
def test_mit():
    licenses = {u'MIT License', u'MIT + file LICENSE', u'Old MIT'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'MIT'
示例#47
0
def test_mit():
    licenses = {u'MIT License', u'MIT + file LICENSE', u'Old MIT'}
    for cens in licenses:
        fam = guess_license_family(cens)
        assert fam == u'MIT'
示例#48
0
def get_package_metadata(package, d, data, output_dir, python_version, all_extras,
                         recursive, created_recipes, noarch_python, noprompt, packages,
                         config, setup_options):

    print("Downloading %s" % package)

    pkginfo = get_pkginfo(package,
                          filename=d['filename'],
                          pypiurl=d['pypiurl'],
                          md5=d['md5'],
                          python_version=python_version,
                          setup_options=setup_options,
                          config=config)

    setuptools_build = pkginfo.get('setuptools', False)
    setuptools_run = False

    # Look at the entry_points and construct console_script and
    #  gui_scripts entry_points for conda
    entry_points = pkginfo.get('entry_points', [])
    if entry_points:
        if isinstance(entry_points, str):
            # makes sure it is left-shifted
            newstr = "\n".join(x.strip()
                                for x in entry_points.splitlines())
            _config = configparser.ConfigParser()
            entry_points = {}
            try:
                _config.readfp(StringIO(newstr))
            except Exception as err:
                print("WARNING: entry-points not understood: ",
                        err)
                print("The string was", newstr)
                entry_points = pkginfo['entry_points']
            else:
                setuptools_run = True
                for section in _config.sections():
                    if section in ['console_scripts', 'gui_scripts']:
                        value = ['%s=%s' % (option, _config.get(section, option))
                                    for option in _config.options(section)]
                        entry_points[section] = value
        if not isinstance(entry_points, dict):
            print("WARNING: Could not add entry points. They were:")
            print(entry_points)
        else:
            cs = entry_points.get('console_scripts', [])
            gs = entry_points.get('gui_scripts', [])
            if isinstance(cs, string_types):
                cs = [cs]
            if isinstance(gs, string_types):
                gs = [gs]
            # We have *other* kinds of entry-points so we need
            # setuptools at run-time
            if set(entry_points.keys()) - {'console_scripts', 'gui_scripts'}:
                setuptools_build = True
                setuptools_run = True
            # TODO: Use pythonw for gui scripts
            entry_list = (cs + gs)
            if len(cs + gs) != 0:
                d['entry_points'] = INDENT.join([''] + entry_list)
                d['entry_comment'] = ''
                d['build_comment'] = ''
                d['test_commands'] = INDENT.join([''] + make_entry_tests(entry_list))

    requires = get_requirements(package, pkginfo, all_extras=all_extras)

    if requires or setuptools_build or setuptools_run:
        deps = []
        if setuptools_run:
            deps.append('setuptools')
        for deptext in requires:
            if isinstance(deptext, string_types):
                deptext = deptext.splitlines()
            # Every item may be a single requirement
            #  or a multiline requirements string...
            for dep in deptext:
                # ... and may also contain comments...
                dep = dep.split('#')[0].strip()
                if dep:  # ... and empty (or comment only) lines
                    spec = spec_from_line(dep)
                    if spec is None:
                        sys.exit("Error: Could not parse: %s" % dep)
                    deps.append(spec)

        if 'setuptools' in deps:
            setuptools_build = False
            setuptools_run = False
            d['egg_comment'] = ''
            d['build_comment'] = ''
        d['build_depends'] = INDENT.join([''] +
                                            ['setuptools'] * setuptools_build +
                                            deps)
        d['run_depends'] = INDENT.join([''] +
                                        ['setuptools'] * setuptools_run +
                                        deps)

        if recursive:
            for dep in deps:
                dep = dep.split()[0]
                if not exists(join(output_dir, dep)):
                    if dep not in created_recipes:
                        packages.append(dep)

    if noarch_python:
        d['build_comment'] = ''
        d['noarch_python_comment'] = ''

    if 'packagename' not in d:
        d['packagename'] = pkginfo['name'].lower()
    if d['version'] == 'UNKNOWN':
        d['version'] = pkginfo['version']

    if pkginfo.get('packages'):
        deps = set(pkginfo['packages'])
        if d['import_tests']:
            if not d['import_tests'] or d['import_tests'] == 'PLACEHOLDER':
                olddeps = []
            else:
                olddeps = [x for x in d['import_tests'].split()
                        if x != '-']
            deps = set(olddeps) | deps
        d['import_tests'] = INDENT.join(sorted(deps))
        d['import_comment'] = ''

        d['tests_require'] = INDENT.join(sorted([spec_from_line(pkg) for pkg
                                                    in pkginfo['tests_require']]))

    if pkginfo.get('homeurl'):
        d['homeurl'] = pkginfo['homeurl']
    else:
        if data and 'homeurl' in data:
            d['homeurl'] = data['homeurl']
        else:
            d['homeurl'] = "The package home page"
            d['home_comment'] = '#'

    if pkginfo.get('summary'):
        d['summary'] = repr(pkginfo['summary'])
    else:
        if data:
            d['summary'] = repr(data['summary'])
        else:
            d['summary'] = "Summary of the package"
            d['summary_comment'] = '#'
    if d['summary'].startswith("u'") or d['summary'].startswith('u"'):
        d['summary'] = d['summary'][1:]

    license_classifier = "License :: OSI Approved :: "
    if pkginfo.get('classifiers'):
        licenses = [classifier.split(license_classifier, 1)[1] for
            classifier in pkginfo['classifiers'] if classifier.startswith(license_classifier)]
    elif data and 'classifiers' in data:
        licenses = [classifier.split(license_classifier, 1)[1] for classifier in
                data['classifiers'] if classifier.startswith(license_classifier)]
    else:
        licenses = []
    if not licenses:
        if pkginfo.get('license'):
            license_name = pkginfo['license']
        elif data and 'license' in data:
            license_name = data['license']
        else:
            license_name = None
        if license_name:
            if noprompt:
                pass
            elif '\n' not in license_name:
                print('Using "%s" for the license' % license_name)
            else:
                # Some projects put the whole license text in this field
                print("This is the license for %s" % package)
                print()
                print(license_name)
                print()
                license_name = input("What license string should I use? ")
        else:
            if noprompt:
                license_name = "UNKNOWN"
            else:
                license_name = input(("No license could be found for %s on " +
                                    "PyPI or in the source. What license should I use? ") %
                                package)
    else:
        license_name = ' or '.join(licenses)
    d['license'] = license_name
    d['license_family'] = guess_license_family(license_name, allowed_license_families)
示例#49
0
def skeletonize(
    packages,
    output_dir=".",
    version=None,
    git_tag=None,
    cran_url="http://cran.r-project.org/",
    recursive=False,
    archive=True,
    version_compare=False,
    update_outdated=False,
    config=None,
):

    if not config:
        config = Config()

    if len(packages) > 1 and version_compare:
        raise ValueError("--version-compare only works with one package at a time")
    if not update_outdated and not packages:
        raise ValueError("At least one package must be supplied")

    package_dicts = {}

    cran_metadata = get_cran_metadata(cran_url, output_dir)

    if update_outdated:
        packages = get_outdated(output_dir, cran_metadata, packages)
        for pkg in packages:
            rm_rf(join(output_dir[0], "r-" + pkg))

    while packages:
        package = packages.pop()

        is_github_url = "github.com" in package
        url = package

        if is_github_url:
            rm_rf(config.work_dir)
            source.git_source({"git_url": package}, ".", config=config)
            git_tag = git_tag[0] if git_tag else get_latest_git_tag(config)
            p = subprocess.Popen(
                ["git", "checkout", git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir
            )
            stdout, stderr = p.communicate()
            stdout = stdout.decode("utf-8")
            stderr = stderr.decode("utf-8")
            if p.returncode:
                sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (git_tag, stderr.strip()))
            if stdout:
                print(stdout, file=sys.stdout)
            if stderr:
                print(stderr, file=sys.stderr)

            DESCRIPTION = join(config.work_dir, "DESCRIPTION")
            if not isfile(DESCRIPTION):
                sub_description_pkg = join(config.work_dir, "pkg", "DESCRIPTION")
                sub_description_name = join(config.work_dir, package.split("/")[-1], "DESCRIPTION")
                if isfile(sub_description_pkg):
                    DESCRIPTION = sub_description_pkg
                elif isfile(sub_description_name):
                    DESCRIPTION = sub_description_name
                else:
                    sys.exit(
                        "%s does not appear to be a valid R package "
                        "(no DESCRIPTION file in %s, %s)" % (package, sub_description_pkg, sub_description_name)
                    )

            with open(DESCRIPTION) as f:
                description_text = clear_trailing_whitespace(f.read())

            d = dict_from_cran_lines(remove_package_line_continuations(description_text.splitlines()))
            d["orig_description"] = description_text
            package = d["Package"].lower()
            cran_metadata[package] = d

        if package.startswith("r-"):
            package = package[2:]
        if package.endswith("/"):
            package = package[:-1]
        if package.lower() not in cran_metadata:
            sys.exit("Package %s not found" % package)

        # Make sure package is always uses the CRAN capitalization
        package = cran_metadata[package.lower()]["Package"]

        if not is_github_url:
            session = get_session(output_dir)
            cran_metadata[package.lower()].update(get_package_metadata(cran_url, package, session))

        dir_path = join(output_dir, "r-" + package.lower())
        if exists(dir_path) and not version_compare:
            raise RuntimeError("directory already exists: %s" % dir_path)

        cran_package = cran_metadata[package.lower()]

        d = package_dicts.setdefault(
            package,
            {
                "cran_packagename": package,
                "packagename": "r-" + package.lower(),
                "build_depends": "",
                "run_depends": "",
                # CRAN doesn't seem to have this metadata :(
                "home_comment": "#",
                "homeurl": "",
                "summary_comment": "#",
                "summary": "",
            },
        )

        if is_github_url:
            d["url_key"] = ""
            d["fn_key"] = ""
            d["git_url_key"] = "git_url:"
            d["git_tag_key"] = "git_tag:"
            d["filename"] = ""
            d["cranurl"] = ""
            d["git_url"] = url
            d["git_tag"] = git_tag
        else:
            d["url_key"] = "url:"
            d["fn_key"] = "fn:"
            d["git_url_key"] = ""
            d["git_tag_key"] = ""
            d["git_url"] = ""
            d["git_tag"] = ""

        if version:
            d["version"] = version
            raise NotImplementedError("Package versions from CRAN are not yet implemented")

        d["cran_version"] = cran_package["Version"]
        # Conda versions cannot have -. Conda (verlib) will treat _ as a .
        d["conda_version"] = d["cran_version"].replace("-", "_")
        if version_compare:
            sys.exit(not version_compare(dir_path, d["conda_version"]))

        if not is_github_url:
            d["filename"] = "{cran_packagename}_{cran_version}.tar.gz".format(**d)
            if archive:
                d["cranurl"] = (
                    INDENT
                    + cran_url
                    + "src/contrib/"
                    + d["filename"]
                    + INDENT
                    + cran_url
                    + "src/contrib/"
                    + "Archive/"
                    + d["cran_packagename"]
                    + "/"
                    + d["filename"]
                )
            else:
                d["cranurl"] = " " + cran_url + "src/contrib/" + d["filename"]

        d["cran_metadata"] = "\n".join(["# %s" % l for l in cran_package["orig_lines"] if l])

        # XXX: We should maybe normalize these
        d["license"] = cran_package.get("License", "None")
        d["license_family"] = guess_license_family(d["license"], allowed_license_families)

        if "License_is_FOSS" in cran_package:
            d["license"] += " (FOSS)"
        if cran_package.get("License_restricts_use") == "yes":
            d["license"] += " (Restricts use)"

        if "URL" in cran_package:
            d["home_comment"] = ""
            d["homeurl"] = " " + yaml_quote_string(cran_package["URL"])

        if "Description" in cran_package:
            d["summary_comment"] = ""
            d["summary"] = " " + yaml_quote_string(cran_package["Description"])

        if "Suggests" in cran_package:
            d["suggests"] = "# Suggests: %s" % cran_package["Suggests"]
        else:
            d["suggests"] = ""

        # Every package depends on at least R.
        # I'm not sure what the difference between depends and imports is.
        depends = [s.strip() for s in cran_package.get("Depends", "").split(",") if s.strip()]
        imports = [s.strip() for s in cran_package.get("Imports", "").split(",") if s.strip()]
        links = [s.strip() for s in cran_package.get("LinkingTo", "").split(",") if s.strip()]

        dep_dict = {}

        for s in set(chain(depends, imports, links)):
            match = VERSION_DEPENDENCY_REGEX.match(s)
            if not match:
                sys.exit("Could not parse version from dependency of %s: %s" % (package, s))
            name = match.group("name")
            archs = match.group("archs")
            relop = match.group("relop") or ""
            version = match.group("version") or ""
            version = version.replace("-", "_")
            # If there is a relop there should be a version
            assert not relop or version

            if archs:
                sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s))

            dep_dict[name] = "{relop}{version}".format(relop=relop, version=version)

        if "R" not in dep_dict:
            dep_dict["R"] = ""

        for dep_type in ["build", "run"]:
            deps = []
            for name in sorted(dep_dict):
                if name in R_BASE_PACKAGE_NAMES:
                    continue
                if name == "R":
                    # Put R first
                    # Regarless of build or run, and whether this is a recommended package or not,
                    # it can only depend on 'r-base' since anything else can and will cause cycles
                    # in the dependency graph. The cran metadata lists all dependencies anyway, even
                    # those packages that are in the recommended group.
                    r_name = "r-base"
                    # We don't include any R version restrictions because we
                    # always build R packages against an exact R version
                    deps.insert(0, "{indent}{r_name}".format(indent=INDENT, r_name=r_name))
                else:
                    conda_name = "r-" + name.lower()

                    if dep_dict[name]:
                        deps.append(
                            "{indent}{name} {version}".format(name=conda_name, version=dep_dict[name], indent=INDENT)
                        )
                    else:
                        deps.append("{indent}{name}".format(name=conda_name, indent=INDENT))
                    if recursive:
                        if not exists(join(output_dir, conda_name)):
                            packages.append(name)

            if cran_package.get("NeedsCompilation", "no") == "yes":
                if dep_type == "build":
                    deps.append("{indent}posix               # [win]".format(indent=INDENT))
                    deps.append("{indent}{{{{native}}}}toolchain # [win]".format(indent=INDENT))
                    deps.append("{indent}gcc                 # [not win]".format(indent=INDENT))
            d["%s_depends" % dep_type] = "".join(deps)

    for package in package_dicts:
        d = package_dicts[package]
        name = d["packagename"]

        # Normalize the metadata values
        d = {k: unicodedata.normalize("NFKD", text_type(v)).encode("ascii", "ignore").decode() for k, v in iteritems(d)}

        makedirs(join(output_dir, name))
        print("Writing recipe for %s" % package.lower())
        with open(join(output_dir, name, "meta.yaml"), "w") as f:
            f.write(clear_trailing_whitespace(CRAN_META.format(**d)))
        with open(join(output_dir, name, "build.sh"), "w") as f:
            f.write(CRAN_BUILD_SH.format(**d))
        with open(join(output_dir, name, "bld.bat"), "w") as f:
            f.write(CRAN_BLD_BAT.format(**d))

    print("Done")