def find_repo_entry_and_arch(repo_primary, architectures, depend): dep_name = depend['name'] found_package_name = '' try: # Try direct lookup first. found_package = repo_primary[dep_name] found_package_name = dep_name except: # Look through the provides of all packages. for name, package in iteritems(repo_primary): for arch in architectures: if arch in package: if 'provides' in package[arch]: for provide in package[arch]['provides']: if provide['name'] == dep_name: print("Found it in {}".format(name)) found_package = package found_package_name = name break if found_package_name == '': print("WARNING: Did not find package called (or another one providing) {}".format(dep_name)) # noqa return None, None, None chosen_arch = None for arch in architectures: if arch in found_package: chosen_arch = arch break if not chosen_arch: return None, None, None entry = found_package[chosen_arch] return entry, found_package_name, chosen_arch
def find_repo_entry_and_arch(repo_primary, architectures, depend): dep_name = depend['name'] found_package_name = '' try: # Try direct lookup first. found_package = repo_primary[dep_name] found_package_name = dep_name except: # Look through the provides of all packages. for name, package in iteritems(repo_primary): for arch in architectures: if arch in package: if 'provides' in package[arch]: for provide in package[arch]['provides']: if provide['name'] == dep_name: print("Found it in {}".format(name)) found_package = package found_package_name = name break if found_package_name == '': print( "WARNING: Did not find package called (or another one providing) {}" .format(dep_name)) # noqa return None, None, None chosen_arch = None for arch in architectures: if arch in found_package: chosen_arch = arch break if not chosen_arch: return None, None, None entry = found_package[chosen_arch] return entry, found_package_name, chosen_arch
def test_installable(channel='defaults'): success = True log = get_logger(__name__) has_py = re.compile(r'py(\d)(\d)') for platform in ['osx-64', 'linux-32', 'linux-64', 'win-32', 'win-64']: log.info("######## Testing platform %s ########", platform) channels = [channel] index = get_index(channel_urls=channels, prepend=False, platform=platform) for _, rec in iteritems(index): # If we give channels at the command line, only look at # packages from those channels (not defaults). if channel != 'defaults' and rec.get('schannel', 'defaults') == 'defaults': continue name = rec['name'] if name in {'conda', 'conda-build'}: # conda can only be installed in the root environment continue if name.endswith('@'): # this is a 'virtual' feature record that conda adds to the index for the solver # and should be ignored here continue # Don't fail just because the package is a different version of Python # than the default. We should probably check depends rather than the # build string. build = rec['build'] match = has_py.search(build) assert match if 'py' in build else True, build if match: additional_packages = [ 'python=%s.%s' % (match.group(1), match.group(2)) ] else: additional_packages = [] version = rec['version'] log.info('Testing %s=%s', name, version) try: install_steps = check_install([name + '=' + version] + additional_packages, channel_urls=channels, prepend=False, platform=platform) success &= bool(install_steps) except KeyboardInterrupt: raise # sys.exit raises an exception that doesn't subclass from Exception except BaseException as e: success = False log.error("FAIL: %s %s on %s with %s (%s)", name, version, platform, additional_packages, e) return success
def write_conda_recipe(packages, distro, output_dir, architecture, recursive, override_arch, dependency_add, config): cdt_name = distro bits = '32' if architecture in ('armv6', 'armv7a', 'i686', 'i386') else '64' base_architectures = dict({'i686': 'i386'}) # gnu_architectures are those recognized by the canonical config.sub / config.guess # and crosstool-ng. They are returned from ${CC} -dumpmachine and are a part of the # sysroot. gnu_architectures = dict({'ppc64le': 'powerpc64le'}) try: base_architecture = base_architectures[architecture] except: base_architecture = architecture try: gnu_architecture = gnu_architectures[architecture] except: gnu_architecture = architecture architecture_bits = dict({'architecture': architecture, 'base_architecture': base_architecture, 'gnu_architecture': gnu_architecture, 'bits': bits}) cdt = dict() for k, v in iteritems(CDTs[cdt_name]): if isinstance(v, string_types): cdt[k] = v.format(**architecture_bits) else: cdt[k] = v # Add undeclared dependencies. These can be baked into the global # CDTs dict, passed in on the commandline or a mixture of both. if 'dependency_add' not in cdt: cdt['dependency_add'] = dict() if dependency_add: for package_and_missed_deps in dependency_add: as_list = package_and_missed_deps[0].split(',') if as_list[0] in cdt['dependency_add']: cdt['dependency_add'][as_list[0]].extend(as_list[1:]) else: cdt['dependency_add'][as_list[0]] = as_list[1:] repomd_url = cdt['repomd_url'] repo_primary = get_repo_dict(repomd_url, "primary", massage_primary, cdt, config.src_cache) for package in packages: write_conda_recipes(recursive, repo_primary, package, [architecture, "noarch"], cdt, output_dir, override_arch, config.src_cache)
def test_installable(channel='defaults'): success = True log = get_logger(__name__) has_py = re.compile(r'py(\d)(\d)') for platform in ['osx-64', 'linux-32', 'linux-64', 'win-32', 'win-64']: log.info("######## Testing platform %s ########", platform) channels = [channel] index = get_index(channel_urls=channels, prepend=False, platform=platform) for _, rec in iteritems(index): # If we give channels at the command line, only look at # packages from those channels (not defaults). if channel != 'defaults' and rec.get('schannel', 'defaults') == 'defaults': continue name = rec['name'] if name in {'conda', 'conda-build'}: # conda can only be installed in the root environment continue if name.endswith('@'): # this is a 'virtual' feature record that conda adds to the index for the solver # and should be ignored here continue # Don't fail just because the package is a different version of Python # than the default. We should probably check depends rather than the # build string. build = rec['build'] match = has_py.search(build) assert match if 'py' in build else True, build if match: additional_packages = ['python=%s.%s' % (match.group(1), match.group(2))] else: additional_packages = [] version = rec['version'] log.info('Testing %s=%s', name, version) try: install_steps = check_install([name + '=' + version] + additional_packages, channel_urls=channels, prepend=False, platform=platform) success &= bool(install_steps) except KeyboardInterrupt: raise # sys.exit raises an exception that doesn't subclass from Exception except BaseException as e: success = False log.error("FAIL: %s %s on %s with %s (%s)", name, version, platform, additional_packages, e) return success
def _installed(prefix): installed = linked_data(prefix) installed = {rec['name']: dist for dist, rec in iteritems(installed)} return installed
def skeletonize(packages, output_dir=".", version=None, git_tag=None, cran_url="https://cran.r-project.org/", recursive=False, archive=True, version_compare=False, update_outdated=False, config=None): if not config: config = Config() if len(packages) > 1 and version_compare: raise ValueError( "--version-compare only works with one package at a time") if not update_outdated and not packages: raise ValueError("At least one package must be supplied") package_dicts = {} cran_metadata = get_cran_metadata(cran_url, output_dir) if update_outdated: packages = get_outdated(output_dir, cran_metadata, packages) for pkg in packages: rm_rf(join(output_dir[0], 'r-' + pkg)) while packages: package = packages.pop() is_github_url = 'github.com' in package url = package if is_github_url: rm_rf(config.work_dir) m = metadata.MetaData.fromdict({'source': { 'git_url': package }}, config=config) source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir) git_tag = git_tag[0] if git_tag else get_latest_git_tag(config) p = subprocess.Popen(['git', 'checkout', git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir) stdout, stderr = p.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if p.returncode: sys.exit( "Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (git_tag, stderr.strip())) if stdout: print(stdout, file=sys.stdout) if stderr: print(stderr, file=sys.stderr) DESCRIPTION = join(config.work_dir, "DESCRIPTION") if not isfile(DESCRIPTION): sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION") sub_description_name = join(config.work_dir, package.split('/')[-1], "DESCRIPTION") if isfile(sub_description_pkg): DESCRIPTION = sub_description_pkg elif isfile(sub_description_name): DESCRIPTION = sub_description_name else: sys.exit( "%s does not appear to be a valid R package " "(no DESCRIPTION file in %s, %s)" % (package, sub_description_pkg, sub_description_name)) with open(DESCRIPTION) as f: description_text = clear_trailing_whitespace(f.read()) d = dict_from_cran_lines( remove_package_line_continuations( description_text.splitlines())) d['orig_description'] = description_text package = d['Package'].lower() cran_metadata[package] = d if package.startswith('r-'): package = package[2:] if package.endswith('/'): package = package[:-1] if package.lower() not in cran_metadata: sys.exit("Package %s not found" % package) # Make sure package is always uses the CRAN capitalization package = cran_metadata[package.lower()]['Package'] if not is_github_url: session = get_session(output_dir) cran_metadata[package.lower()].update( get_package_metadata(cran_url, package, session)) dir_path = join(output_dir, 'r-' + package.lower()) if exists(dir_path) and not version_compare: raise RuntimeError("directory already exists: %s" % dir_path) cran_package = cran_metadata[package.lower()] d = package_dicts.setdefault( package, { 'cran_packagename': package, 'packagename': 'r-' + package.lower(), 'build_depends': '', 'run_depends': '', # CRAN doesn't seem to have this metadata :( 'home_comment': '#', 'homeurl': '', 'summary_comment': '#', 'summary': '', }) if is_github_url: d['url_key'] = '' d['fn_key'] = '' d['git_url_key'] = 'git_url:' d['git_tag_key'] = 'git_tag:' d['hash_entry'] = '# You can add a hash for the file here, like md5, sha1 or sha256' d['filename'] = '' d['cranurl'] = '' d['git_url'] = url d['git_tag'] = git_tag else: d['url_key'] = 'url:' d['fn_key'] = 'fn:' d['git_url_key'] = '' d['git_tag_key'] = '' d['git_url'] = '' d['git_tag'] = '' d['hash_entry'] = '' if version: d['version'] = version raise NotImplementedError( "Package versions from CRAN are not yet implemented") d['cran_version'] = cran_package['Version'] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d['conda_version'] = d['cran_version'].replace('-', '_') if version_compare: sys.exit(not version_compare(dir_path, d['conda_version'])) if not is_github_url: filename = '{}_{}.tar.gz' contrib_url = cran_url + 'src/contrib/' package_url = contrib_url + filename.format( package, d['cran_version']) # calculate sha256 by downloading source sha256 = hashlib.sha256() print("Downloading source from {}".format(package_url)) sha256.update(urlopen(package_url).read()) d['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest()) d['filename'] = filename.format(package, '{{ version }}') if archive: d['cranurl'] = (INDENT + contrib_url + d['filename'] + INDENT + contrib_url + 'Archive/{}/'.format(package) + d['filename']) else: d['cranurl'] = ' ' + cran_url + 'src/contrib/' + d['filename'] d['cran_metadata'] = '\n'.join( ['# %s' % l for l in cran_package['orig_lines'] if l]) # XXX: We should maybe normalize these d['license'] = cran_package.get("License", "None") d['license_family'] = guess_license_family(d['license'], allowed_license_families) if 'License_is_FOSS' in cran_package: d['license'] += ' (FOSS)' if cran_package.get('License_restricts_use') == 'yes': d['license'] += ' (Restricts use)' if "URL" in cran_package: d['home_comment'] = '' d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL']) else: # use CRAN page as homepage if nothing has been specified d['home_comment'] = '' d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format( package) if 'Description' in cran_package: d['summary_comment'] = '' d['summary'] = ' ' + yaml_quote_string(cran_package['Description']) if "Suggests" in cran_package: d['suggests'] = "# Suggests: %s" % cran_package['Suggests'] else: d['suggests'] = '' # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [ s.strip() for s in cran_package.get('Depends', '').split(',') if s.strip() ] imports = [ s.strip() for s in cran_package.get('Imports', '').split(',') if s.strip() ] links = [ s.strip() for s in cran_package.get("LinkingTo", '').split(',') if s.strip() ] dep_dict = {} for s in set(chain(depends, imports, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit("Could not parse version from dependency of %s: %s" % (package, s)) name = match.group('name') archs = match.group('archs') relop = match.group('relop') or '' version = match.group('version') or '' version = version.replace('-', '_') # If there is a relop there should be a version assert not relop or version if archs: sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = '{relop}{version}'.format(relop=relop, version=version) if 'R' not in dep_dict: dep_dict['R'] = '' for dep_type in ['build', 'run']: deps = [] for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == 'R': # Put R first # Regarless of build or run, and whether this is a recommended package or not, # it can only depend on 'r-base' since anything else can and will cause cycles # in the dependency graph. The cran metadata lists all dependencies anyway, even # those packages that are in the recommended group. r_name = 'r-base' # We don't include any R version restrictions because we # always build R packages against an exact R version deps.insert( 0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name)) else: conda_name = 'r-' + name.lower() if dep_dict[name]: deps.append('{indent}{name} {version}'.format( name=conda_name, version=dep_dict[name], indent=INDENT)) else: deps.append('{indent}{name}'.format(name=conda_name, indent=INDENT)) if recursive: if not exists(join(output_dir, conda_name)): packages.append(name) if cran_package.get("NeedsCompilation", 'no') == 'yes': if dep_type == 'build': deps.append('{indent}posix # [win]'.format( indent=INDENT)) deps.append( '{indent}{{{{native}}}}toolchain # [win]'.format( indent=INDENT)) deps.append( '{indent}gcc # [not win]'.format( indent=INDENT)) elif dep_type == 'run': deps.append( '{indent}{{{{native}}}}gcc-libs # [win]'.format( indent=INDENT)) deps.append( '{indent}libgcc # [not win]'.format( indent=INDENT)) d['%s_depends' % dep_type] = ''.join(deps) for package in package_dicts: d = package_dicts[package] name = d['packagename'] # Normalize the metadata values d = { k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore').decode() for k, v in iteritems(d) } makedirs(join(output_dir, name)) print("Writing recipe for %s" % package.lower()) with open(join(output_dir, name, 'meta.yaml'), 'w') as f: f.write(clear_trailing_whitespace(CRAN_META.format(**d))) with open(join(output_dir, name, 'build.sh'), 'w') as f: f.write(CRAN_BUILD_SH.format(**d)) with open(join(output_dir, name, 'bld.bat'), 'w') as f: f.write(CRAN_BLD_BAT.format(**d)) print("Done")
def skeletonize(packages, output_dir=".", version=None, git_tag=None, cran_url="https://cran.r-project.org/", recursive=False, archive=True, version_compare=False, update_outdated=False, config=None): if not config: config = Config() if len(packages) > 1 and version_compare: raise ValueError("--version-compare only works with one package at a time") if not update_outdated and not packages: raise ValueError("At least one package must be supplied") package_dicts = {} cran_metadata = get_cran_metadata(cran_url, output_dir) if update_outdated: packages = get_outdated(output_dir, cran_metadata, packages) for pkg in packages: rm_rf(join(output_dir[0], 'r-' + pkg)) while packages: package = packages.pop() is_github_url = 'github.com' in package url = package if is_github_url: rm_rf(config.work_dir) m = metadata.MetaData.fromdict({'source': {'git_url': package}}, config=config) source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir) git_tag = git_tag[0] if git_tag else get_latest_git_tag(config) p = subprocess.Popen(['git', 'checkout', git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir) stdout, stderr = p.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if p.returncode: sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (git_tag, stderr.strip())) if stdout: print(stdout, file=sys.stdout) if stderr: print(stderr, file=sys.stderr) DESCRIPTION = join(config.work_dir, "DESCRIPTION") if not isfile(DESCRIPTION): sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION") sub_description_name = join(config.work_dir, package.split('/')[-1], "DESCRIPTION") if isfile(sub_description_pkg): DESCRIPTION = sub_description_pkg elif isfile(sub_description_name): DESCRIPTION = sub_description_name else: sys.exit("%s does not appear to be a valid R package " "(no DESCRIPTION file in %s, %s)" % (package, sub_description_pkg, sub_description_name)) with open(DESCRIPTION) as f: description_text = clear_trailing_whitespace(f.read()) d = dict_from_cran_lines(remove_package_line_continuations( description_text.splitlines())) d['orig_description'] = description_text package = d['Package'].lower() cran_metadata[package] = d if package.startswith('r-'): package = package[2:] if package.endswith('/'): package = package[:-1] if package.lower() not in cran_metadata: sys.exit("Package %s not found" % package) # Make sure package is always uses the CRAN capitalization package = cran_metadata[package.lower()]['Package'] if not is_github_url: session = get_session(output_dir) cran_metadata[package.lower()].update(get_package_metadata(cran_url, package, session)) dir_path = join(output_dir, 'r-' + package.lower()) if exists(dir_path) and not version_compare: raise RuntimeError("directory already exists: %s" % dir_path) cran_package = cran_metadata[package.lower()] d = package_dicts.setdefault(package, { 'cran_packagename': package, 'packagename': 'r-' + package.lower(), 'build_depends': '', 'run_depends': '', # CRAN doesn't seem to have this metadata :( 'home_comment': '#', 'homeurl': '', 'summary_comment': '#', 'summary': '', }) if is_github_url: d['url_key'] = '' d['fn_key'] = '' d['git_url_key'] = 'git_url:' d['git_tag_key'] = 'git_tag:' d['hash_entry'] = '# You can add a hash for the file here, like md5, sha1 or sha256' d['filename'] = '' d['cranurl'] = '' d['git_url'] = url d['git_tag'] = git_tag else: d['url_key'] = 'url:' d['fn_key'] = 'fn:' d['git_url_key'] = '' d['git_tag_key'] = '' d['git_url'] = '' d['git_tag'] = '' d['hash_entry'] = '' if version: d['version'] = version raise NotImplementedError("Package versions from CRAN are not yet implemented") d['cran_version'] = cran_package['Version'] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d['conda_version'] = d['cran_version'].replace('-', '_') if version_compare: sys.exit(not version_compare(dir_path, d['conda_version'])) if not is_github_url: filename = '{}_{}.tar.gz' contrib_url = cran_url + 'src/contrib/' package_url = contrib_url + filename.format(package, d['cran_version']) # calculate sha256 by downloading source sha256 = hashlib.sha256() print("Downloading source from {}".format(package_url)) sha256.update(urlopen(package_url).read()) d['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest()) d['filename'] = filename.format(package, '{{ version }}') if archive: d['cranurl'] = (INDENT + contrib_url + d['filename'] + INDENT + contrib_url + 'Archive/{}/'.format(package) + d['filename']) else: d['cranurl'] = ' ' + cran_url + 'src/contrib/' + d['filename'] d['cran_metadata'] = '\n'.join(['# %s' % l for l in cran_package['orig_lines'] if l]) # XXX: We should maybe normalize these d['license'] = cran_package.get("License", "None") d['license_family'] = guess_license_family(d['license'], allowed_license_families) if 'License_is_FOSS' in cran_package: d['license'] += ' (FOSS)' if cran_package.get('License_restricts_use') == 'yes': d['license'] += ' (Restricts use)' if "URL" in cran_package: d['home_comment'] = '' d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL']) else: # use CRAN page as homepage if nothing has been specified d['home_comment'] = '' d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package) if 'Description' in cran_package: d['summary_comment'] = '' d['summary'] = ' ' + yaml_quote_string(cran_package['Description']) if "Suggests" in cran_package: d['suggests'] = "# Suggests: %s" % cran_package['Suggests'] else: d['suggests'] = '' # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [s.strip() for s in cran_package.get('Depends', '').split(',') if s.strip()] imports = [s.strip() for s in cran_package.get('Imports', '').split(',') if s.strip()] links = [s.strip() for s in cran_package.get("LinkingTo", '').split(',') if s.strip()] dep_dict = {} for s in set(chain(depends, imports, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit("Could not parse version from dependency of %s: %s" % (package, s)) name = match.group('name') archs = match.group('archs') relop = match.group('relop') or '' version = match.group('version') or '' version = version.replace('-', '_') # If there is a relop there should be a version assert not relop or version if archs: sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = '{relop}{version}'.format(relop=relop, version=version) if 'R' not in dep_dict: dep_dict['R'] = '' for dep_type in ['build', 'run']: deps = [] for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == 'R': # Put R first # Regarless of build or run, and whether this is a recommended package or not, # it can only depend on 'r-base' since anything else can and will cause cycles # in the dependency graph. The cran metadata lists all dependencies anyway, even # those packages that are in the recommended group. r_name = 'r-base' # We don't include any R version restrictions because we # always build R packages against an exact R version deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name)) else: conda_name = 'r-' + name.lower() if dep_dict[name]: deps.append('{indent}{name} {version}'.format(name=conda_name, version=dep_dict[name], indent=INDENT)) else: deps.append('{indent}{name}'.format(name=conda_name, indent=INDENT)) if recursive: if not exists(join(output_dir, conda_name)): packages.append(name) if cran_package.get("NeedsCompilation", 'no') == 'yes': if dep_type == 'build': deps.append('{indent}posix # [win]'.format(indent=INDENT)) deps.append('{indent}{{{{native}}}}toolchain # [win]'.format(indent=INDENT)) deps.append('{indent}gcc # [not win]'.format(indent=INDENT)) elif dep_type == 'run': deps.append('{indent}{{{{native}}}}gcc-libs # [win]'.format(indent=INDENT)) deps.append('{indent}libgcc # [not win]'.format(indent=INDENT)) d['%s_depends' % dep_type] = ''.join(deps) for package in package_dicts: d = package_dicts[package] name = d['packagename'] # Normalize the metadata values d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore') .decode() for k, v in iteritems(d)} makedirs(join(output_dir, name)) print("Writing recipe for %s" % package.lower()) with open(join(output_dir, name, 'meta.yaml'), 'w') as f: f.write(clear_trailing_whitespace(CRAN_META.format(**d))) with open(join(output_dir, name, 'build.sh'), 'w') as f: f.write(CRAN_BUILD_SH.format(**d)) with open(join(output_dir, name, 'bld.bat'), 'w') as f: f.write(CRAN_BLD_BAT.format(**d)) print("Done")
def write_conda_recipe( packages, distro, output_dir, architecture, recursive, override_arch, dependency_add, config, build_number, conda_forge_style, single_sysroot, cdt_info, ): cdt_name = distro bits = "32" if architecture in ("armv6", "armv7a", "i686", "i386") else "64" base_architectures = dict({"i686": "i386"}) # gnu_architectures are those recognized by the canonical config.sub / config.guess # and crosstool-ng. They are returned from ${CC} -dumpmachine and are a part of the # sysroot. gnu_architectures = dict({"ppc64le": "powerpc64le"}) try: base_architecture = base_architectures[architecture] except Exception: base_architecture = architecture try: gnu_architecture = gnu_architectures[architecture] except Exception: gnu_architecture = architecture architecture_bits = dict({ "architecture": architecture, "base_architecture": base_architecture, "gnu_architecture": gnu_architecture, "bits": bits, }) cdt = dict() for k, v in iteritems(cdt_info[cdt_name]): if isinstance(v, string_types): cdt[k] = v.format(**architecture_bits) else: cdt[k] = v # Add undeclared dependencies. These can be baked into the global # CDTs dict, passed in on the commandline or a mixture of both. if "dependency_add" not in cdt: cdt["dependency_add"] = dict() if dependency_add: for package_and_missed_deps in dependency_add: as_list = package_and_missed_deps[0].split(",") if as_list[0] in cdt["dependency_add"]: cdt["dependency_add"][as_list[0]].extend(as_list[1:]) else: cdt["dependency_add"][as_list[0]] = as_list[1:] repomd_url = cdt["repomd_url"] repo_primary = get_repo_dict(repomd_url, "primary", massage_primary, cdt, config.src_cache) for package in packages: write_conda_recipes( recursive, repo_primary, package, [architecture, "noarch"], cdt, output_dir, override_arch, config.src_cache, build_number, conda_forge_style, single_sysroot, )
def skeletonize(in_packages, output_dir=".", output_suffix="", add_maintainer=None, version=None, git_tag=None, cran_url="https://cran.r-project.org/", recursive=False, archive=True, version_compare=False, update_policy='', config=None): output_dir = realpath(output_dir) if not config: config = Config() if len(in_packages) > 1 and version_compare: raise ValueError( "--version-compare only works with one package at a time") if update_policy == 'error' and not in_packages: raise ValueError("At least one package must be supplied") package_dicts = {} package_list = [] cran_metadata = get_cran_metadata(cran_url, output_dir) # r_recipes_in_output_dir = [] # recipes = listdir(output_dir) # for recipe in recipes: # if not recipe.startswith('r-') or not isdir(recipe): # continue # r_recipes_in_output_dir.append(recipe) for package in in_packages: inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, package) if inputs_dict: package_dicts.update( {inputs_dict['pkg-name']: { 'inputs': inputs_dict }}) for package_name, package_dict in package_dicts.items(): package_list.append(package_name) while package_list: inputs = package_dicts[package_list.pop()]['inputs'] location = inputs['location'] pkg_name = inputs['pkg-name'] is_github_url = location and 'github.com' in location url = inputs['location'] dir_path = inputs['new-location'] print("Making/refreshing recipe for {}".format(pkg_name)) # Bodges GitHub packages into cran_metadata if is_github_url: rm_rf(config.work_dir) m = metadata.MetaData.fromdict({'source': { 'git_url': location }}, config=config) source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir) new_git_tag = git_tag if git_tag else get_latest_git_tag(config) p = subprocess.Popen(['git', 'checkout', new_git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir) stdout, stderr = p.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if p.returncode: sys.exit( "Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (new_git_tag, stderr.strip())) if stdout: print(stdout, file=sys.stdout) if stderr: print(stderr, file=sys.stderr) DESCRIPTION = join(config.work_dir, "DESCRIPTION") if not isfile(DESCRIPTION): sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION") sub_description_name = join(config.work_dir, location.split('/')[-1], "DESCRIPTION") if isfile(sub_description_pkg): DESCRIPTION = sub_description_pkg elif isfile(sub_description_name): DESCRIPTION = sub_description_name else: sys.exit( "%s does not appear to be a valid R package " "(no DESCRIPTION file in %s, %s)" % (location, sub_description_pkg, sub_description_name)) with open(DESCRIPTION) as f: description_text = clear_trailing_whitespace(f.read()) d = dict_from_cran_lines( remove_package_line_continuations( description_text.splitlines())) d['orig_description'] = description_text package = d['Package'].lower() cran_metadata[package] = d else: package = pkg_name if pkg_name not in cran_metadata: sys.exit("Package %s not found" % pkg_name) # Make sure package always uses the CRAN capitalization package = cran_metadata[package.lower()]['Package'] if not is_github_url: session = get_session(output_dir) cran_metadata[package.lower()].update( get_package_metadata(cran_url, package, session)) cran_package = cran_metadata[package.lower()] package_dicts[package.lower()].update({ 'cran_packagename': package, 'packagename': 'r-' + package.lower(), 'patches': '', 'build_number': 0, 'build_depends': '', 'run_depends': '', # CRAN doesn't seem to have this metadata :( 'home_comment': '#', 'homeurl': '', 'summary_comment': '#', 'summary': '', }) d = package_dicts[package.lower()] if is_github_url: d['url_key'] = '' d['fn_key'] = '' d['git_url_key'] = 'git_url:' d['git_tag_key'] = 'git_tag:' d['hash_entry'] = '# You can add a hash for the file here, like md5, sha1 or sha256' d['filename'] = '' d['cranurl'] = '' d['git_url'] = url d['git_tag'] = new_git_tag else: d['url_key'] = 'url:' d['fn_key'] = 'fn:' d['git_url_key'] = '' d['git_tag_key'] = '' d['git_url'] = '' d['git_tag'] = '' d['hash_entry'] = '' if version: d['version'] = version raise NotImplementedError( "Package versions from CRAN are not yet implemented") d['cran_version'] = cran_package['Version'] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d['conda_version'] = d['cran_version'].replace('-', '_') if version_compare: sys.exit(not version_compare(dir_path, d['conda_version'])) patches = [] script_env = [] extra_recipe_maintainers = [] build_number = 0 if update_policy.startswith('merge') and inputs['old-metadata']: m = inputs['old-metadata'] patches = make_array(m, 'source/patches') script_env = make_array(m, 'build/script_env') extra_recipe_maintainers = make_array(m, 'extra/recipe-maintainers', add_maintainer) if m.version() == d['conda_version']: build_number = int(m.get_value('build/number', 0)) build_number += 1 if update_policy == 'merge-incr-build-num' else 0 if not len(patches): patches.append("# patches:\n") patches.append(" # List any patch files here\n") patches.append(" # - fix.patch") if add_maintainer: new_maintainer = "{indent}{add_maintainer}".format( indent=INDENT, add_maintainer=add_maintainer) if new_maintainer not in extra_recipe_maintainers: if not len(extra_recipe_maintainers): # We hit this case when there is no existing recipe. extra_recipe_maintainers = make_array( {}, 'extra/recipe-maintainers', True) extra_recipe_maintainers.append(new_maintainer) if len(extra_recipe_maintainers): extra_recipe_maintainers[1:].sort() extra_recipe_maintainers.insert(0, "extra:\n ") d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers) d['patches'] = ''.join(patches) d['script_env'] = ''.join(script_env) d['build_number'] = build_number cached_path = None if not is_github_url: filename = '{}_{}.tar.gz' contrib_url = cran_url + 'src/contrib/' package_url = contrib_url + filename.format( package, d['cran_version']) # calculate sha256 by downloading source sha256 = hashlib.sha256() print("Downloading source from {}".format(package_url)) # We may need to inspect the file later to determine which compilers are needed. cached_path, _ = source.download_to_cache( config.src_cache, '', dict({'url': package_url})) sha256.update(open(cached_path, 'rb').read()) d['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest()) d['filename'] = filename.format(package, '{{ version }}') if archive: d['cranurl'] = (INDENT + contrib_url + d['filename'] + INDENT + contrib_url + 'Archive/{}/'.format(package) + d['filename']) else: d['cranurl'] = ' ' + cran_url + 'src/contrib/' + d['filename'] d['cran_metadata'] = '\n'.join( ['# %s' % l for l in cran_package['orig_lines'] if l]) # XXX: We should maybe normalize these d['license'] = cran_package.get("License", "None") d['license_family'] = guess_license_family(d['license'], allowed_license_families) if 'License_is_FOSS' in cran_package: d['license'] += ' (FOSS)' if cran_package.get('License_restricts_use') == 'yes': d['license'] += ' (Restricts use)' if "URL" in cran_package: d['home_comment'] = '' d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL']) else: # use CRAN page as homepage if nothing has been specified d['home_comment'] = '' if is_github_url: d['homeurl'] = ' {}'.format(location) else: d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format( package) if 'Description' in cran_package: d['summary_comment'] = '' d['summary'] = ' ' + yaml_quote_string(cran_package['Description']) if "Suggests" in cran_package: d['suggests'] = "# Suggests: %s" % cran_package['Suggests'] else: d['suggests'] = '' # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [ s.strip() for s in cran_package.get('Depends', '').split(',') if s.strip() ] imports = [ s.strip() for s in cran_package.get('Imports', '').split(',') if s.strip() ] links = [ s.strip() for s in cran_package.get("LinkingTo", '').split(',') if s.strip() ] dep_dict = {} seen = set() for s in list(chain(imports, depends, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit("Could not parse version from dependency of %s: %s" % (package, s)) name = match.group('name') if name in seen: continue seen.add(name) archs = match.group('archs') relop = match.group('relop') or '' ver = match.group('version') or '' ver = ver.replace('-', '_') # If there is a relop there should be a version assert not relop or ver if archs: sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver) if 'R' not in dep_dict: dep_dict['R'] = '' need_git = is_github_url if cran_package.get("NeedsCompilation", 'no') == 'yes': with tarfile.open(cached_path) as tf: need_f = any([ f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf ]) # Fortran builds use CC to perform the link (they do not call the linker directly). need_c = True if need_f else \ any([f.name.lower().endswith('.c') for f in tf]) need_cxx = any([ f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++')) for f in tf ]) need_autotools = any( [f.name.lower().endswith('/configure') for f in tf]) need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \ any([f.name.lower().endswith(('/makefile', '/makevars')) for f in tf]) else: need_c = need_cxx = need_f = need_autotools = need_make = False for dep_type in ['build', 'run']: deps = [] # Put non-R dependencies first. if dep_type == 'build': if need_c: deps.append( "{indent}{{{{ compiler('c') }}}} # [not win]". format(indent=INDENT)) if need_cxx: deps.append( "{indent}{{{{ compiler('cxx') }}}} # [not win]". format(indent=INDENT)) if need_f: deps.append( "{indent}{{{{ compiler('fortran') }}}} # [not win]". format(indent=INDENT)) if need_c or need_cxx or need_f: deps.append( "{indent}{{{{native}}}}toolchain # [win]". format(indent=INDENT)) if need_autotools or need_make or need_git: deps.append( "{indent}{{{{posix}}}}filesystem # [win]". format(indent=INDENT)) if need_git: deps.append( "{indent}{{{{posix}}}}git".format(indent=INDENT)) if need_autotools: deps.append( "{indent}{{{{posix}}}}sed # [win]". format(indent=INDENT)) deps.append( "{indent}{{{{posix}}}}grep # [win]". format(indent=INDENT)) deps.append( "{indent}{{{{posix}}}}autoconf".format(indent=INDENT)) deps.append( "{indent}{{{{posix}}}}automake".format(indent=INDENT)) deps.append("{indent}{{{{posix}}}}pkg-config".format( indent=INDENT)) if need_make: deps.append( "{indent}{{{{posix}}}}make".format(indent=INDENT)) elif dep_type == 'run': if need_c or need_cxx or need_f: deps.append( "{indent}{{{{native}}}}gcc-libs # [win]". format(indent=INDENT)) for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == 'R': # Put R first # Regarless of build or run, and whether this is a recommended package or not, # it can only depend on 'r-base' since anything else can and will cause cycles # in the dependency graph. The cran metadata lists all dependencies anyway, even # those packages that are in the recommended group. r_name = 'r-base' # We don't include any R version restrictions because we # always build R packages against an exact R version deps.insert( 0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name)) else: conda_name = 'r-' + name.lower() if dep_dict[name]: deps.append('{indent}{name} {version}'.format( name=conda_name, version=dep_dict[name], indent=INDENT)) else: deps.append('{indent}{name}'.format(name=conda_name, indent=INDENT)) if recursive: lower_name = name.lower() if lower_name not in package_dicts: inputs_dict = package_to_inputs_dict( output_dir, output_suffix, git_tag, lower_name) assert lower_name == inputs_dict['pkg-name'], \ "name %s != inputs_dict['pkg-name'] %s" % (name, inputs_dict['pkg-name']) assert lower_name not in package_list package_dicts.update( {lower_name: { 'inputs': inputs_dict }}) package_list.append(lower_name) d['%s_depends' % dep_type] = ''.join(deps) for package in package_dicts: d = package_dicts[package] dir_path = d['inputs']['new-location'] if exists(dir_path) and not version_compare: if update_policy == 'error': raise RuntimeError("directory already exists " "(and --update-policy is 'error'): %s" % dir_path) elif update_policy == 'overwrite': rm_rf(dir_path) elif update_policy == 'skip-up-to-date' and up_to_date( cran_metadata, d['inputs']['old-metadata']): continue elif update_policy == 'skip-existing' and d['inputs']['old-metadata']: continue # Normalize the metadata values d = { k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore').decode() for k, v in iteritems(d) } try: makedirs(join(dir_path)) except: pass print("Writing recipe for %s" % package.lower()) with open(join(dir_path, 'meta.yaml'), 'w') as f: f.write(clear_trailing_whitespace(CRAN_META.format(**d))) if not exists(join(dir_path, 'build.sh')) or update_policy == 'overwrite': with open(join(dir_path, 'build.sh'), 'w') as f: f.write(CRAN_BUILD_SH.format(**d)) if not exists(join(dir_path, 'bld.bat')) or update_policy == 'overwrite': with open(join(dir_path, 'bld.bat'), 'w') as f: f.write(CRAN_BLD_BAT.format(**d))
def skeletonize(in_packages, output_dir=".", output_suffix="", add_maintainer=None, version=None, git_tag=None, cran_url=None, recursive=False, archive=True, version_compare=False, update_policy='', r_interp='r-base', use_binaries_ver=None, use_noarch_generic=False, use_rtools_win=False, config=None, variant_config_files=None): output_dir = realpath(output_dir) config = get_or_merge_config(config, variant_config_files=variant_config_files) if not cran_url: with TemporaryDirectory() as t: _variant = get_package_variants(t, config)[0] cran_url = ensure_list(_variant.get('cran_mirror', DEFAULT_VARIANTS['cran_mirror']))[0] if len(in_packages) > 1 and version_compare: raise ValueError("--version-compare only works with one package at a time") if update_policy == 'error' and not in_packages: raise ValueError("At least one package must be supplied") package_dicts = {} package_list = [] cran_url = cran_url.rstrip('/') cran_metadata = get_cran_metadata(cran_url, output_dir) # r_recipes_in_output_dir = [] # recipes = listdir(output_dir) # for recipe in recipes: # if not recipe.startswith('r-') or not isdir(recipe): # continue # r_recipes_in_output_dir.append(recipe) for package in in_packages: inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, package) if inputs_dict: package_dicts.update({inputs_dict['pkg-name']: {'inputs': inputs_dict}}) for package_name, package_dict in package_dicts.items(): package_list.append(package_name) while package_list: inputs = package_dicts[package_list.pop()]['inputs'] location = inputs['location'] pkg_name = inputs['pkg-name'] is_github_url = location and 'github.com' in location is_tarfile = location and isfile(location) and tarfile.is_tarfile(location) url = inputs['location'] dir_path = inputs['new-location'] print("Making/refreshing recipe for {}".format(pkg_name)) # Bodges GitHub packages into cran_metadata if is_github_url or is_tarfile: rm_rf(config.work_dir) if is_github_url: m = metadata.MetaData.fromdict({'source': {'git_url': location}}, config=config) source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir) new_git_tag = git_tag if git_tag else get_latest_git_tag(config) p = subprocess.Popen(['git', 'checkout', new_git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir) stdout, stderr = p.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if p.returncode: sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (new_git_tag, stderr.strip())) if stdout: print(stdout, file=sys.stdout) if stderr: print(stderr, file=sys.stderr) else: m = metadata.MetaData.fromdict({'source': {'url': location}}, config=config) source.unpack(m.get_section('source'), m.config.work_dir, m.config.src_cache, output_dir, m.config.work_dir) DESCRIPTION = join(config.work_dir, "DESCRIPTION") if not isfile(DESCRIPTION): sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION") sub_description_name = join(config.work_dir, location.split('/')[-1], "DESCRIPTION") if isfile(sub_description_pkg): DESCRIPTION = sub_description_pkg elif isfile(sub_description_name): DESCRIPTION = sub_description_name else: sys.exit("%s does not appear to be a valid R package " "(no DESCRIPTION file in %s, %s)" % (location, sub_description_pkg, sub_description_name)) with open(DESCRIPTION) as f: description_text = clear_whitespace(f.read()) d = dict_from_cran_lines(remove_package_line_continuations( description_text.splitlines())) d['orig_description'] = description_text package = d['Package'].lower() cran_metadata[package] = d else: package = pkg_name if pkg_name not in cran_metadata: sys.exit("Package %s not found" % pkg_name) # Make sure package always uses the CRAN capitalization package = cran_metadata[package.lower()]['Package'] if not is_github_url and not is_tarfile: session = get_session(output_dir) cran_metadata[package.lower()].update(get_package_metadata(cran_url, package, session)) cran_package = cran_metadata[package.lower()] package_dicts[package.lower()].update( { 'cran_packagename': package, 'packagename': 'r-' + package.lower(), 'patches': '', 'build_number': 0, 'build_depends': '', 'host_depends': '', 'run_depends': '', # CRAN doesn't seem to have this metadata :( 'home_comment': '#', 'homeurl': '', 'summary_comment': '#', 'summary': '', }) d = package_dicts[package.lower()] d['binary1'] = '' d['binary2'] = '' if version: d['version'] = version raise NotImplementedError("Package versions from CRAN are not yet implemented") d['cran_version'] = cran_package['Version'] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d['conda_version'] = d['cran_version'].replace('-', '_') if version_compare: sys.exit(not version_compare(dir_path, d['conda_version'])) patches = [] script_env = [] extra_recipe_maintainers = [] build_number = 0 if update_policy.startswith('merge') and inputs['old-metadata']: m = inputs['old-metadata'] patches = make_array(m, 'source/patches') script_env = make_array(m, 'build/script_env') extra_recipe_maintainers = make_array(m, 'extra/recipe-maintainers', add_maintainer) if m.version() == d['conda_version']: build_number = int(m.get_value('build/number', 0)) build_number += 1 if update_policy == 'merge-incr-build-num' else 0 if add_maintainer: new_maintainer = "{indent}{add_maintainer}".format(indent=INDENT, add_maintainer=add_maintainer) if new_maintainer not in extra_recipe_maintainers: if not len(extra_recipe_maintainers): # We hit this case when there is no existing recipe. extra_recipe_maintainers = make_array({}, 'extra/recipe-maintainers', True) extra_recipe_maintainers.append(new_maintainer) if len(extra_recipe_maintainers): extra_recipe_maintainers[1:].sort() extra_recipe_maintainers.insert(0, "extra:\n ") d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers) d['patches'] = ''.join(patches) d['script_env'] = ''.join(script_env) d['build_number'] = build_number cached_path = None cran_layout = {'source': {'selector': '{others}', 'dir': 'src/contrib/', 'ext': '.tar.gz', # If we had platform filters we would change this to: # build_for_linux or is_github_url or is_tarfile 'use_this': True}, 'win-64': {'selector': 'win64', 'dir': 'bin/windows/contrib/{}/'.format(use_binaries_ver), 'ext': '.zip', 'use_this': True if use_binaries_ver else False}, 'osx-64': {'selector': 'osx', 'dir': 'bin/macosx/el-capitan/contrib/{}/'.format( use_binaries_ver), 'ext': '.tgz', 'use_this': True if use_binaries_ver else False}} available = {} for archive_type, archive_details in iteritems(cran_layout): contrib_url = '' if archive_details['use_this']: if is_tarfile: filename = basename(location) contrib_url = relpath(location, dir_path) contrib_url_rendered = package_url = contrib_url sha256 = hashlib.sha256() cached_path = location elif not is_github_url: filename_rendered = '{}_{}{}'.format( package, d['cran_version'], archive_details['ext']) filename = '{}_{{{{ version }}}}'.format(package) + archive_details['ext'] contrib_url = '{{{{ cran_mirror }}}}/{}'.format(archive_details['dir']) contrib_url_rendered = cran_url + '/{}'.format(archive_details['dir']) package_url = contrib_url_rendered + filename_rendered sha256 = hashlib.sha256() print("Downloading {} from {}".format(archive_type, package_url)) # We may need to inspect the file later to determine which compilers are needed. cached_path, _ = source.download_to_cache( config.src_cache, '', {'url': package_url, 'fn': archive_type + '-' + filename_rendered}) available_details = {} available_details['selector'] = archive_details['selector'] if cached_path: sha256.update(open(cached_path, 'rb').read()) available_details['filename'] = filename available_details['contrib_url'] = contrib_url available_details['contrib_url_rendered'] = contrib_url_rendered available_details['cranurl'] = package_url available_details['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest()) available_details['cached_path'] = cached_path # This is rubbish; d[] should be renamed global[] and should be # merged into source and binaryN. if archive_type == 'source': if is_github_url: available_details['url_key'] = '' available_details['fn_key'] = '' available_details['git_url_key'] = 'git_url:' available_details['git_tag_key'] = 'git_tag:' hash_msg = '# You can add a hash for the file here, (md5, sha1 or sha256)' available_details['hash_entry'] = hash_msg available_details['filename'] = '' available_details['cranurl'] = '' available_details['git_url'] = url available_details['git_tag'] = new_git_tag available_details['archive_keys'] = '' else: available_details['url_key'] = 'url:' available_details['fn_key'] = 'fn:' available_details['git_url_key'] = '' available_details['git_tag_key'] = '' available_details['cranurl'] = ' ' + contrib_url + filename available_details['git_url'] = '' available_details['git_tag'] = '' available_details['patches'] = d['patches'] available[archive_type] = available_details # Figure out the selectors according to what is available. _all = ['linux', 'win32', 'win64', 'osx'] from_source = _all[:] binary_id = 1 for archive_type, archive_details in iteritems(available): if archive_type != 'source': sel = archive_details['selector'] from_source.remove(sel) binary_id += 1 else: for k, v in iteritems(archive_details): d[k] = v if from_source == _all: sel_src = "" sel_src_and_win = ' # [win]' sel_src_not_win = ' # [not win]' else: sel_src = ' # [' + ' or '.join(from_source) + ']' sel_src_and_win = ' # [' + ' or '.join(fs for fs in from_source if fs.startswith('win')) + ']' sel_src_not_win = ' # [' + ' or '.join(fs for fs in from_source if not fs.startswith('win')) + ']' d['sel_src'] = sel_src d['sel_src_and_win'] = sel_src_and_win d['sel_src_not_win'] = sel_src_not_win if 'source' in available: available_details = available['source'] available_details['sel'] = sel_src filename = available_details['filename'] if 'contrib_url' in available_details: contrib_url = available_details['contrib_url'] if archive: if is_tarfile: available_details['cranurl'] = (INDENT + contrib_url) else: available_details['cranurl'] = (INDENT + contrib_url + filename + sel_src + INDENT + contrib_url + 'Archive/{}/'.format(package) + filename + sel_src) else: available_details['cranurl'] = ' ' + contrib_url + filename + sel_src if not is_github_url: available_details['archive_keys'] = '{fn_key} {filename} {sel}\n' \ ' {url_key}{sel}' \ ' {cranurl}\n' \ ' {hash_entry}{sel}'.format( **available_details) d['cran_metadata'] = '\n'.join(['# %s' % l for l in cran_package['orig_lines'] if l]) # Render the source and binaryN keys binary_id = 1 for archive_type, archive_details in iteritems(available): if archive_type == 'source': d['source'] = SOURCE_META.format(**archive_details) else: archive_details['sel'] = ' # [' + archive_details['selector'] + ']' d['binary' + str(binary_id)] = BINARY_META.format(**archive_details) binary_id += 1 # XXX: We should maybe normalize these d['license'] = cran_package.get("License", "None") d['license_family'] = guess_license_family(d['license'], allowed_license_families) if 'License_is_FOSS' in cran_package: d['license'] += ' (FOSS)' if cran_package.get('License_restricts_use') == 'yes': d['license'] += ' (Restricts use)' if "URL" in cran_package: d['home_comment'] = '' d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL']) else: # use CRAN page as homepage if nothing has been specified d['home_comment'] = '' if is_github_url: d['homeurl'] = ' {}'.format(location) else: d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package) if not use_noarch_generic or cran_package.get("NeedsCompilation", 'no') == 'yes': d['noarch_generic'] = '' else: d['noarch_generic'] = 'noarch: generic' if 'Description' in cran_package: d['summary_comment'] = '' d['summary'] = ' ' + yaml_quote_string(cran_package['Description']) if "Suggests" in cran_package: d['suggests'] = "# Suggests: %s" % cran_package['Suggests'] else: d['suggests'] = '' # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [s.strip() for s in cran_package.get('Depends', '').split(',') if s.strip()] imports = [s.strip() for s in cran_package.get('Imports', '').split(',') if s.strip()] links = [s.strip() for s in cran_package.get("LinkingTo", '').split(',') if s.strip()] dep_dict = {} seen = set() for s in list(chain(imports, depends, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit("Could not parse version from dependency of %s: %s" % (package, s)) name = match.group('name') if name in seen: continue seen.add(name) archs = match.group('archs') relop = match.group('relop') or '' ver = match.group('version') or '' ver = ver.replace('-', '_') # If there is a relop there should be a version assert not relop or ver if archs: sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver) if 'R' not in dep_dict: dep_dict['R'] = '' need_git = is_github_url if cran_package.get("NeedsCompilation", 'no') == 'yes': with tarfile.open(available['source']['cached_path']) as tf: need_f = any([f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf]) # Fortran builds use CC to perform the link (they do not call the linker directly). need_c = True if need_f else \ any([f.name.lower().endswith('.c') for f in tf]) need_cxx = any([f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++')) for f in tf]) need_autotools = any([f.name.lower().endswith('/configure') for f in tf]) need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \ any([f.name.lower().endswith(('/makefile', '/makevars')) for f in tf]) else: need_c = need_cxx = need_f = need_autotools = need_make = False if 'Rcpp' in dep_dict or 'RcppArmadillo' in dep_dict: need_cxx = True if need_cxx: need_c = True for dep_type in ['build', 'host', 'run']: deps = [] # Put non-R dependencies first. if dep_type == 'build': if need_c: deps.append("{indent}{{{{ compiler('c') }}}} {sel}".format( indent=INDENT, sel=sel_src_not_win)) if need_cxx: deps.append("{indent}{{{{ compiler('cxx') }}}} {sel}".format( indent=INDENT, sel=sel_src_not_win)) if need_f: deps.append("{indent}{{{{ compiler('fortran') }}}}{sel}".format( indent=INDENT, sel=sel_src_not_win)) if use_rtools_win: need_c = need_cxx = need_f = need_autotools = need_make = False deps.append("{indent}{{{{native}}}}rtools {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{native}}}}extsoft {sel}".format( indent=INDENT, sel=sel_src_and_win)) if need_c or need_cxx or need_f: deps.append("{indent}{{{{native}}}}toolchain {sel}".format( indent=INDENT, sel=sel_src_and_win)) if need_autotools or need_make or need_git: deps.append("{indent}{{{{posix}}}}filesystem {sel}".format( indent=INDENT, sel=sel_src_and_win)) if need_git: deps.append("{indent}{{{{posix}}}}git".format(indent=INDENT)) if need_autotools: deps.append("{indent}{{{{posix}}}}sed {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}grep {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}autoconf {sel}".format( indent=INDENT, sel=sel_src)) deps.append("{indent}{{{{posix}}}}automake-wrapper{sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}automake {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}pkg-config".format(indent=INDENT)) if need_make: deps.append("{indent}{{{{posix}}}}make {sel}".format( indent=INDENT, sel=sel_src)) elif dep_type == 'run': if need_c or need_cxx or need_f: deps.append("{indent}{{{{native}}}}gcc-libs {sel}".format( indent=INDENT, sel=sel_src_and_win)) if dep_type == 'host' or dep_type == 'run': for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == 'R': # Put R first # Regarless of build or run, and whether this is a # recommended package or not, it can only depend on # r_interp since anything else can and will cause # cycles in the dependency graph. The cran metadata # lists all dependencies anyway, even those packages # that are in the recommended group. # We don't include any R version restrictions because # conda-build always pins r-base and mro-base version. deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_interp)) else: conda_name = 'r-' + name.lower() if dep_dict[name]: deps.append('{indent}{name} {version}'.format(name=conda_name, version=dep_dict[name], indent=INDENT)) else: deps.append('{indent}{name}'.format(name=conda_name, indent=INDENT)) if recursive: lower_name = name.lower() if lower_name not in package_dicts: inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, lower_name) assert lower_name == inputs_dict['pkg-name'], \ "name %s != inputs_dict['pkg-name'] %s" % ( name, inputs_dict['pkg-name']) assert lower_name not in package_list package_dicts.update({lower_name: {'inputs': inputs_dict}}) package_list.append(lower_name) d['%s_depends' % dep_type] = ''.join(deps) for package in package_dicts: d = package_dicts[package] dir_path = d['inputs']['new-location'] if exists(dir_path) and not version_compare: if update_policy == 'error': raise RuntimeError("directory already exists " "(and --update-policy is 'error'): %s" % dir_path) elif update_policy == 'overwrite': rm_rf(dir_path) elif update_policy == 'skip-up-to-date' and up_to_date(cran_metadata, d['inputs']['old-metadata']): continue elif update_policy == 'skip-existing' and d['inputs']['old-metadata']: continue # Normalize the metadata values d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore') .decode() for k, v in iteritems(d)} try: makedirs(join(dir_path)) except: pass print("Writing recipe for %s" % package.lower()) with open(join(dir_path, 'meta.yaml'), 'w') as f: f.write(clear_whitespace(CRAN_META.format(**d))) if not exists(join(dir_path, 'build.sh')) or update_policy == 'overwrite': with open(join(dir_path, 'build.sh'), 'w') as f: if from_source == all: f.write(CRAN_BUILD_SH_SOURCE.format(**d)) elif from_source == []: f.write(CRAN_BUILD_SH_BINARY.format(**d)) else: tpbt = [target_platform_bash_test_by_sel[t] for t in from_source] d['source_pf_bash'] = ' || '.join(['[[ $target_platform ' + s + ' ]]' for s in tpbt]) f.write(CRAN_BUILD_SH_MIXED.format(**d)) if not exists(join(dir_path, 'bld.bat')) or update_policy == 'overwrite': with open(join(dir_path, 'bld.bat'), 'w') as f: if len([fs for fs in from_source if fs.startswith('win')]) == 2: f.write(CRAN_BLD_BAT_SOURCE.format(**d)) else: f.write(CRAN_BLD_BAT_MIXED.format(**d))
def main(): is_github_url = False this_dir = os.getcwd() # Unpack config = Config() cran_metadata = {} # Some packages are missing on some systems. Need to mark them so they get skipped. to_be_packaged = set() with TemporaryDirectory() as merged_td: for platform, details in sources.items(): with TemporaryDirectory() as td: os.chdir(td) libdir = None # libarchive cannot handle the .exe, just skip it. Means we cannot figure out packages that are not available # for Windows. if platform == 'win_no': details['cached_as'], sha256 = cache_file( config.src_cache, details['url'], details['fn'], details['sha']) libarchive.extract_file(details['cached_as']) libdir = os.path.join(td, details['library']) library = os.listdir(libdir) print(library) details['to_be_packaged'] = set(library) - set( R_BASE_PACKAGE_NAMES) elif platform == 'linux': details['cached_as'], sha256 = cache_file( config.src_cache, details['url'], details['fn'], details['sha']) libarchive.extract_file(details['cached_as']) import glob for filename in glob.iglob('**/*.rpm', recursive=True): print(filename) libarchive.extract_file(filename) libdir = os.path.join(td, details['library']) library = os.listdir(libdir) print(library) details['to_be_packaged'] = set(library) - set( R_BASE_PACKAGE_NAMES) elif platform == 'mac': details['cached_as'], sha256 = cache_file( config.src_cache, details['url'], details['fn'], details['sha']) os.system("bsdtar -xf {}".format(details['cached_as'])) payloads = glob.glob('./**/Payload', recursive=True) print(payloads) for payload in payloads: libarchive.extract_file(payload) libdir = os.path.join(td, details['library']) library = os.listdir(libdir) print(library) details['to_be_packaged'] = set(library) - set( R_BASE_PACKAGE_NAMES) if libdir: distutils.dir_util.copy_tree(libdir, merged_td) os.chdir(merged_td) libdir = merged_td # Fudge until we can unpack the Windows installer .exe on Linux sources['win']['to_be_packaged'] = sources['linux']['to_be_packaged'] # Get the superset of all packages (note we will no longer have the DESCRIPTION?!) for platform, details in sources.items(): if 'to_be_packaged' in details: to_be_packaged.update(details['to_be_packaged']) package_dicts = {} for package in sorted(list(to_be_packaged)): p = os.path.join(libdir, package, "DESCRIPTION") with open(p) as cran_description: description_text = cran_description.read() d = dict_from_cran_lines( remove_package_line_continuations( description_text.splitlines())) d['orig_description'] = description_text package = d['Package'].lower() cran_metadata[package] = d # Make sure package always uses the CRAN capitalization package = cran_metadata[package.lower()]['Package'] package_dicts[package.lower()] = {} package_dicts[package.lower()]['osx'] = True if package in sources[ 'mac']['to_be_packaged'] else False package_dicts[package.lower()]['win'] = True if package in sources[ 'win']['to_be_packaged'] else False package_dicts[ package.lower()]['linux'] = True if package in sources[ 'linux']['to_be_packaged'] else False for package in sorted(list(to_be_packaged)): cran_package = cran_metadata[package.lower()] package_dicts[package.lower()].update({ 'cran_packagename': package, 'packagename': 'r-' + package.lower(), 'patches': '', 'build_number': 0, 'build_depends': '', 'run_depends': '', # CRAN doesn't seem to have this metadata :( 'home_comment': '#', 'homeurl': '', 'summary_comment': '#', 'summary': '', }) d = package_dicts[package.lower()] d['url_key'] = 'url:' d['fn_key'] = 'fn:' d['git_url_key'] = '' d['git_tag_key'] = '' d['git_url'] = '' d['git_tag'] = '' d['hash_entry'] = '' d['build'], d['skip'] = build_and_skip_olw[( package_dicts[package.lower()]['osx'], package_dicts[package.lower()]['linux'], package_dicts[package.lower()]['win'])] d['cran_version'] = cran_package['Version'] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d['conda_version'] = d['cran_version'].replace('-', '_') patches = [] script_env = [] extra_recipe_maintainers = [] build_number = 0 if not len(patches): patches.append("# patches:\n") patches.append(" # List any patch files here\n") patches.append(" # - fix.patch") if len(extra_recipe_maintainers): extra_recipe_maintainers[1:].sort() extra_recipe_maintainers.insert(0, "extra:\n ") d['build_number'] = build_number cached_path = None d['cran_metadata'] = '\n'.join( ['# %s' % l for l in cran_package['orig_lines'] if l]) # XXX: We should maybe normalize these d['license'] = cran_package.get("License", "None") d['license_family'] = guess_license_family( d['license'], allowed_license_families) if 'License_is_FOSS' in cran_package: d['license'] += ' (FOSS)' if cran_package.get('License_restricts_use') == 'yes': d['license'] += ' (Restricts use)' if "URL" in cran_package: d['home_comment'] = '' d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL']) else: # use CRAN page as homepage if nothing has been specified d['home_comment'] = '' d['homeurl'] = ' https://mran.microsoft.com/package/{}'.format( package) if 'Description' in cran_package: d['summary_comment'] = '' d['summary'] = ' ' + yaml_quote_string( cran_package['Description'], indent=6) if "Suggests" in cran_package: d['suggests'] = " # Suggests: %s" % cran_package['Suggests'] else: d['suggests'] = '' if package.lower() == 'revoutilsmath': d['always_include_files'] = " always_include_files:\n" \ " - lib/R/lib/libRblas.so # [linux]" else: d['always_include_files'] = '' # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [ s.strip() for s in cran_package.get('Depends', '').split(',') if s.strip() ] imports = [ s.strip() for s in cran_package.get('Imports', '').split(',') if s.strip() ] links = [ s.strip() for s in cran_package.get("LinkingTo", '').split(',') if s.strip() ] dep_dict = {} seen = set() for s in list(chain(imports, depends, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit( "Could not parse version from dependency of %s: %s" % (package, s)) name = match.group('name') if name in seen: continue seen.add(name) archs = match.group('archs') relop = match.group('relop') or '' ver = match.group('version') or '' ver = ver.replace('-', '_') # If there is a relop there should be a version assert not relop or ver if archs: sys.exit( "Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver) if 'R' not in dep_dict: dep_dict['R'] = '' need_git = is_github_url if cran_package.get("NeedsCompilation", 'no') == 'yes' and False: with tarfile.open(cached_path) as tf: need_f = any([ f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf ]) # Fortran builds use CC to perform the link (they do not call the linker directly). need_c = True if need_f else \ any([f.name.lower().endswith('.c') for f in tf]) need_cxx = any([ f.name.lower().endswith( ('.cxx', '.cpp', '.cc', '.c++')) for f in tf ]) need_autotools = any( [f.name.lower().endswith('/configure') for f in tf]) need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \ any([f.name.lower().endswith(('/makefile', '/makevars')) for f in tf]) else: need_c = need_cxx = need_f = need_autotools = need_make = False for dep_type in ['build', 'run']: deps = [] # Put non-R dependencies first. if dep_type == 'build': if need_c: deps.append( "{indent}{{{{ compiler('c') }}}} # [not win]" .format(indent=INDENT)) if need_cxx: deps.append( "{indent}{{{{ compiler('cxx') }}}} # [not win]" .format(indent=INDENT)) if need_f: deps.append( "{indent}{{{{ compiler('fortran') }}}} # [not win]" .format(indent=INDENT)) if need_c or need_cxx or need_f: deps.append( "{indent}{{{{native}}}}toolchain # [win]". format(indent=INDENT)) if need_autotools or need_make or need_git: deps.append( "{indent}{{{{posix}}}}filesystem # [win]". format(indent=INDENT)) if need_git: deps.append( "{indent}{{{{posix}}}}git".format(indent=INDENT)) if need_autotools: deps.append( "{indent}{{{{posix}}}}sed # [win]". format(indent=INDENT)) deps.append( "{indent}{{{{posix}}}}grep # [win]". format(indent=INDENT)) deps.append("{indent}{{{{posix}}}}autoconf".format( indent=INDENT)) deps.append("{indent}{{{{posix}}}}automake".format( indent=INDENT)) deps.append("{indent}{{{{posix}}}}pkg-config".format( indent=INDENT)) if need_make: deps.append( "{indent}{{{{posix}}}}make".format(indent=INDENT)) elif dep_type == 'run': if need_c or need_cxx or need_f: deps.append( "{indent}{{{{native}}}}gcc-libs # [win]". format(indent=INDENT)) for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == 'R': # Put R first # Regardless of build or run, and whether this is a recommended package or not, # it can only depend on 'r-base' since anything else can and will cause cycles # in the dependency graph. The cran metadata lists all dependencies anyway, even # those packages that are in the recommended group. # r_name = 'r-base ' + VERSION # We don't include any R version restrictions because we # always build R packages against an exact R version # deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name)) # Bit of a hack since I added overlinking checking. r_name = 'mro-base ' + VERSION deps.insert( 0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_name)) else: conda_name = 'r-' + name.lower() if dep_dict[name]: deps.append('{indent}{name} {version}'.format( name=conda_name, version=dep_dict[name], indent=INDENT)) else: deps.append('{indent}{name}'.format( name=conda_name, indent=INDENT)) d['dep_dict'] = dep_dict # We need this for (1) # Make pin_subpackage from the deps. Done separately so the above is the same as conda-build's # CRAN skeleton (so it is easy to refactor CRAN skeleton so it can be reused here later). for i, dep in enumerate(deps): groups = re.match('(\n.* - )([\w-]+) ?([>=\w0-9._]+)?', dep, re.MULTILINE) indent = groups.group(1) name = groups.group(2) pinning = groups.group(3) if pinning: if '>=' in pinning: deps[ i] = "{}{{{{ pin_subpackage('{}', min_pin='{}', max_pin=None) }}}}".format( indent, name, pinning.replace('>=', '')) else: if name == 'mro-base': # We end up with filenames with 'r34*' in them unless we specify the version here. # TODO :: Ask @msarahan about this. deps[i] = "{}{} {{{{ version }}}}".format( indent, name) else: deps[ i] = "{}{{{{ pin_subpackage('{}', min_pin='{}', max_pin='{}') }}}}".format( indent, name, pinning, pinning) else: deps[ i] = "{}{{{{ pin_subpackage('{}', min_pin='x.x.x.x.x.x', max_pin='x.x.x.x.x.x') }}}}".format( indent, name) # Add missing conda package dependencies. if dep_type == 'run': if d['packagename'] in extra_deps: for extra_dep in extra_deps[d['packagename']]: print("extra_dep is {}".format(extra_dep)) deps.append(extra_dep) print(deps) d['%s_depends' % dep_type] = ''.join(deps) template = { 'version': VERSION, 'win_url': sources['win']['url'], 'win_fn': sources['win']['fn'], 'win_sha': sources['win']['sha'], 'linux_url': sources['linux']['url'], 'linux_fn': sources['linux']['fn'], 'linux_sha': sources['linux']['sha'], 'mac_url': sources['mac']['url'], 'mac_fn': sources['mac']['fn'], 'mac_sha': sources['mac']['sha'] } with open(os.path.join(this_dir, 'meta.yaml'), 'w') as meta_yaml: meta_yaml.write(HEADER.format(**template)) meta_yaml.write(BASE_PACKAGE) for package in package_dicts: d = package_dicts[package] # Normalize the metadata values d = { k: unicodedata.normalize("NFKD", text_type(v)).encode( 'ascii', 'ignore').decode() for k, v in iteritems(d) } meta_yaml.write(PACKAGE.format(**d)) meta_yaml.write(MRO_BASICS_METAPACKAGE) meta_subs = [] for package in package_dicts: meta_subs.append(' - {}{}'.format( package_dicts[package]['packagename'], package_dicts[package]['build'])) meta_yaml.write('\n'.join(sorted(meta_subs))) with open(os.path.join(this_dir, 'build.sh'), 'w') as build_sh: build_sh.write(BUILD_SH.format(**template)) with open(os.path.join(this_dir, 'install-mro-base.sh'), 'w') as install_mro_base: install_mro_base.write(INSTALL_MRO_BASE_HEADER.format(**template)) for excluded in sorted(to_be_packaged, key=lambda s: s.lower()): install_mro_base.write('EXCLUDED_PACKAGES+=(' + excluded + ')\n') install_mro_base.write(INSTALL_MRO_BASE_FOOTER.format(**template)) with open(os.path.join(this_dir, 'install-r-package.sh'), 'w') as install_r_package: install_r_package.write(INSTALL_R_PACKAGE.format(**template))
def valid_distros(): return ", ".join([name for name, _ in iteritems(CDTs)])
def skeletonize(in_packages, output_dir=".", output_suffix="", add_maintainer=None, version=None, git_tag=None, cran_url="https://cran.r-project.org", recursive=False, archive=True, version_compare=False, update_policy='', r_interp='r-base', use_binaries_ver=None, use_noarch_generic=False, use_rtools_win=False, config=None): output_dir = realpath(output_dir) if not config: config = Config() if len(in_packages) > 1 and version_compare: raise ValueError("--version-compare only works with one package at a time") if update_policy == 'error' and not in_packages: raise ValueError("At least one package must be supplied") package_dicts = {} package_list = [] cran_url = cran_url.rstrip('/') cran_metadata = get_cran_metadata(cran_url, output_dir) # r_recipes_in_output_dir = [] # recipes = listdir(output_dir) # for recipe in recipes: # if not recipe.startswith('r-') or not isdir(recipe): # continue # r_recipes_in_output_dir.append(recipe) for package in in_packages: inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, package) if inputs_dict: package_dicts.update({inputs_dict['pkg-name']: {'inputs': inputs_dict}}) for package_name, package_dict in package_dicts.items(): package_list.append(package_name) while package_list: inputs = package_dicts[package_list.pop()]['inputs'] location = inputs['location'] pkg_name = inputs['pkg-name'] is_github_url = location and 'github.com' in location is_tarfile = location and isfile(location) and tarfile.is_tarfile(location) url = inputs['location'] dir_path = inputs['new-location'] print("Making/refreshing recipe for {}".format(pkg_name)) # Bodges GitHub packages into cran_metadata if is_github_url or is_tarfile: rm_rf(config.work_dir) if is_github_url: m = metadata.MetaData.fromdict({'source': {'git_url': location}}, config=config) source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir) new_git_tag = git_tag if git_tag else get_latest_git_tag(config) p = subprocess.Popen(['git', 'checkout', new_git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir) stdout, stderr = p.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if p.returncode: sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (new_git_tag, stderr.strip())) if stdout: print(stdout, file=sys.stdout) if stderr: print(stderr, file=sys.stderr) else: m = metadata.MetaData.fromdict({'source': {'url': location}}, config=config) source.unpack(m.get_section('source'), m.config.work_dir, m.config.src_cache, output_dir, m.config.work_dir) DESCRIPTION = join(config.work_dir, "DESCRIPTION") if not isfile(DESCRIPTION): sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION") sub_description_name = join(config.work_dir, location.split('/')[-1], "DESCRIPTION") if isfile(sub_description_pkg): DESCRIPTION = sub_description_pkg elif isfile(sub_description_name): DESCRIPTION = sub_description_name else: sys.exit("%s does not appear to be a valid R package " "(no DESCRIPTION file in %s, %s)" % (location, sub_description_pkg, sub_description_name)) with open(DESCRIPTION) as f: description_text = clear_whitespace(f.read()) d = dict_from_cran_lines(remove_package_line_continuations( description_text.splitlines())) d['orig_description'] = description_text package = d['Package'].lower() cran_metadata[package] = d else: package = pkg_name if pkg_name not in cran_metadata: sys.exit("Package %s not found" % pkg_name) # Make sure package always uses the CRAN capitalization package = cran_metadata[package.lower()]['Package'] if not is_github_url and not is_tarfile: session = get_session(output_dir) cran_metadata[package.lower()].update(get_package_metadata(cran_url, package, session)) cran_package = cran_metadata[package.lower()] package_dicts[package.lower()].update( { 'cran_packagename': package, 'packagename': 'r-' + package.lower(), 'patches': '', 'build_number': 0, 'build_depends': '', 'host_depends': '', 'run_depends': '', # CRAN doesn't seem to have this metadata :( 'home_comment': '#', 'homeurl': '', 'summary_comment': '#', 'summary': '', }) d = package_dicts[package.lower()] d['binary1'] = '' d['binary2'] = '' if version: d['version'] = version raise NotImplementedError("Package versions from CRAN are not yet implemented") d['cran_version'] = cran_package['Version'] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d['conda_version'] = d['cran_version'].replace('-', '_') if version_compare: sys.exit(not version_compare(dir_path, d['conda_version'])) patches = [] script_env = [] extra_recipe_maintainers = [] build_number = 0 if update_policy.startswith('merge') and inputs['old-metadata']: m = inputs['old-metadata'] patches = make_array(m, 'source/patches') script_env = make_array(m, 'build/script_env') extra_recipe_maintainers = make_array(m, 'extra/recipe-maintainers', add_maintainer) if m.version() == d['conda_version']: build_number = int(m.get_value('build/number', 0)) build_number += 1 if update_policy == 'merge-incr-build-num' else 0 if add_maintainer: new_maintainer = "{indent}{add_maintainer}".format(indent=INDENT, add_maintainer=add_maintainer) if new_maintainer not in extra_recipe_maintainers: if not len(extra_recipe_maintainers): # We hit this case when there is no existing recipe. extra_recipe_maintainers = make_array({}, 'extra/recipe-maintainers', True) extra_recipe_maintainers.append(new_maintainer) if len(extra_recipe_maintainers): extra_recipe_maintainers[1:].sort() extra_recipe_maintainers.insert(0, "extra:\n ") d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers) d['patches'] = ''.join(patches) d['script_env'] = ''.join(script_env) d['build_number'] = build_number cached_path = None cran_layout = {'source': {'selector': '{others}', 'dir': 'src/contrib/', 'ext': '.tar.gz', # If we had platform filters we would change this to: # build_for_linux or is_github_url or is_tarfile 'use_this': True}, 'win-64': {'selector': 'win64', 'dir': 'bin/windows/contrib/{}/'.format(use_binaries_ver), 'ext': '.zip', 'use_this': True if use_binaries_ver else False}, 'osx-64': {'selector': 'osx', 'dir': 'bin/macosx/el-capitan/contrib/{}/'.format( use_binaries_ver), 'ext': '.tgz', 'use_this': True if use_binaries_ver else False}} available = {} for archive_type, archive_details in iteritems(cran_layout): contrib_url = '' if archive_details['use_this']: if is_tarfile: filename = basename(location) contrib_url = relpath(location, dir_path) contrib_url_rendered = package_url = contrib_url sha256 = hashlib.sha256() cached_path = location elif not is_github_url: filename = '{}_{}'.format(package, d['cran_version']) + archive_details['ext'] contrib_url = '{{{{ cran_mirror }}}}/{}'.format(archive_details['dir']) contrib_url_rendered = cran_url + '/{}'.format(archive_details['dir']) package_url = contrib_url_rendered + filename sha256 = hashlib.sha256() print("Downloading {} from {}".format(archive_type, package_url)) # We may need to inspect the file later to determine which compilers are needed. cached_path, _ = source.download_to_cache(config.src_cache, '', dict({'url': package_url, 'fn': archive_type + '-' + filename})) available_details = {} available_details['selector'] = archive_details['selector'] if cached_path: sha256.update(open(cached_path, 'rb').read()) available_details['filename'] = filename available_details['contrib_url'] = contrib_url available_details['contrib_url_rendered'] = contrib_url_rendered available_details['package_url'] = package_url available_details['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest()) available_details['cached_path'] = cached_path # This is rubbish; d[] should be renamed global[] and should be # merged into source and binaryN. if archive_type == 'source': if is_github_url: available_details['url_key'] = '' available_details['fn_key'] = '' available_details['git_url_key'] = 'git_url:' available_details['git_tag_key'] = 'git_tag:' hash_msg = '# You can add a hash for the file here, (md5, sha1 or sha256)' available_details['hash_entry'] = hash_msg available_details['filename'] = '' available_details['cranurl'] = '' available_details['git_url'] = url available_details['git_tag'] = new_git_tag available_details['archive_keys'] = '' else: available_details['url_key'] = 'url:' available_details['fn_key'] = 'fn:' available_details['git_url_key'] = '' available_details['git_tag_key'] = '' available_details['cranurl'] = ' ' + contrib_url + filename available_details['git_url'] = '' available_details['git_tag'] = '' available_details['patches'] = d['patches'] available[archive_type] = available_details # Figure out the selectors according to what is available. _all = ['linux', 'win32', 'win64', 'osx'] from_source = _all[:] binary_id = 1 for archive_type, archive_details in iteritems(available): if archive_type != 'source': sel = archive_details['selector'] from_source.remove(sel) binary_id += 1 else: for k, v in iteritems(archive_details): d[k] = v if from_source == _all: sel_src = "" sel_src_and_win = ' # [win]' sel_src_not_win = ' # [not win]' else: sel_src = ' # [' + ' or '.join(from_source) + ']' sel_src_and_win = ' # [' + ' or '.join(fs for fs in from_source if fs.startswith('win')) + ']' sel_src_not_win = ' # [' + ' or '.join(fs for fs in from_source if not fs.startswith('win')) + ']' d['sel_src'] = sel_src d['sel_src_and_win'] = sel_src_and_win d['sel_src_not_win'] = sel_src_not_win if 'source' in available: available_details = available['source'] available_details['sel'] = sel_src filename = available_details['filename'] if 'contrib_url' in available_details: contrib_url = available_details['contrib_url'] if archive: if is_tarfile: available_details['cranurl'] = (INDENT + contrib_url) else: available_details['cranurl'] = (INDENT + contrib_url + filename + sel_src + INDENT + contrib_url + 'Archive/{}/'.format(package) + filename + sel_src) else: available_details['cranurl'] = ' ' + contrib_url + filename + sel_src if not is_github_url: available_details['archive_keys'] = '{fn_key} {filename}{sel}\n' \ ' {url_key}{sel}' \ ' {cranurl}\n' \ ' {hash_entry}{sel}'.format( **available_details) d['cran_metadata'] = '\n'.join(['# %s' % l for l in cran_package['orig_lines'] if l]) # Render the source and binaryN keys binary_id = 1 for archive_type, archive_details in iteritems(available): if archive_type == 'source': d['source'] = SOURCE_META.format(**archive_details) else: archive_details['sel'] = ' # [' + archive_details['selector'] + ']' d['binary' + str(binary_id)] = BINARY_META.format(**archive_details) binary_id += 1 # XXX: We should maybe normalize these d['license'] = cran_package.get("License", "None") d['license_family'] = guess_license_family(d['license'], allowed_license_families) if 'License_is_FOSS' in cran_package: d['license'] += ' (FOSS)' if cran_package.get('License_restricts_use') == 'yes': d['license'] += ' (Restricts use)' if "URL" in cran_package: d['home_comment'] = '' d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL']) else: # use CRAN page as homepage if nothing has been specified d['home_comment'] = '' if is_github_url: d['homeurl'] = ' {}'.format(location) else: d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package) if not use_noarch_generic or cran_package.get("NeedsCompilation", 'no') == 'yes': d['noarch_generic'] = '' else: d['noarch_generic'] = 'noarch: generic' if 'Description' in cran_package: d['summary_comment'] = '' d['summary'] = ' ' + yaml_quote_string(cran_package['Description']) if "Suggests" in cran_package: d['suggests'] = "# Suggests: %s" % cran_package['Suggests'] else: d['suggests'] = '' # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [s.strip() for s in cran_package.get('Depends', '').split(',') if s.strip()] imports = [s.strip() for s in cran_package.get('Imports', '').split(',') if s.strip()] links = [s.strip() for s in cran_package.get("LinkingTo", '').split(',') if s.strip()] dep_dict = {} seen = set() for s in list(chain(imports, depends, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit("Could not parse version from dependency of %s: %s" % (package, s)) name = match.group('name') if name in seen: continue seen.add(name) archs = match.group('archs') relop = match.group('relop') or '' ver = match.group('version') or '' ver = ver.replace('-', '_') # If there is a relop there should be a version assert not relop or ver if archs: sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver) if 'R' not in dep_dict: dep_dict['R'] = '' need_git = is_github_url if cran_package.get("NeedsCompilation", 'no') == 'yes': with tarfile.open(available['source']['cached_path']) as tf: need_f = any([f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf]) # Fortran builds use CC to perform the link (they do not call the linker directly). need_c = True if need_f else \ any([f.name.lower().endswith('.c') for f in tf]) need_cxx = any([f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++')) for f in tf]) need_autotools = any([f.name.lower().endswith('/configure') for f in tf]) need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \ any([f.name.lower().endswith(('/makefile', '/makevars')) for f in tf]) else: need_c = need_cxx = need_f = need_autotools = need_make = False if 'Rcpp' in dep_dict or 'RcppArmadillo' in dep_dict: need_cxx = True if need_cxx: need_c = True for dep_type in ['build', 'host', 'run']: deps = [] # Put non-R dependencies first. if dep_type == 'build': if need_c: deps.append("{indent}{{{{ compiler('c') }}}} {sel}".format( indent=INDENT, sel=sel_src_not_win)) if need_cxx: deps.append("{indent}{{{{ compiler('cxx') }}}} {sel}".format( indent=INDENT, sel=sel_src_not_win)) if need_f: deps.append("{indent}{{{{ compiler('fortran') }}}} {sel}".format( indent=INDENT, sel=sel_src_not_win)) if use_rtools_win: need_c = need_cxx = need_f = need_autotools = need_make = False deps.append("{indent}{{{{native}}}}rtools {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{native}}}}extsoft {sel}".format( indent=INDENT, sel=sel_src_and_win)) if need_c or need_cxx or need_f: deps.append("{indent}{{{{native}}}}toolchain {sel}".format( indent=INDENT, sel=sel_src_and_win)) if need_autotools or need_make or need_git: deps.append("{indent}{{{{posix}}}}filesystem {sel}".format( indent=INDENT, sel=sel_src_and_win)) if need_git: deps.append("{indent}{{{{posix}}}}git".format(indent=INDENT)) if need_autotools: deps.append("{indent}{{{{posix}}}}sed {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}grep {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}autoconf {sel}".format( indent=INDENT, sel=sel_src)) deps.append("{indent}{{{{posix}}}}automake-wrapper {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}automake {sel}".format( indent=INDENT, sel=sel_src_and_win)) deps.append("{indent}{{{{posix}}}}pkg-config".format(indent=INDENT)) if need_make: deps.append("{indent}{{{{posix}}}}make {sel}".format( indent=INDENT, sel=sel_src)) elif dep_type == 'run': if need_c or need_cxx or need_f: deps.append("{indent}{{{{native}}}}gcc-libs {sel}".format( indent=INDENT, sel=sel_src_and_win)) if dep_type == 'host' or dep_type == 'run': for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == 'R': # Put R first # Regarless of build or run, and whether this is a # recommended package or not, it can only depend on # r_interp since anything else can and will cause # cycles in the dependency graph. The cran metadata # lists all dependencies anyway, even those packages # that are in the recommended group. # We don't include any R version restrictions because # conda-build always pins r-base and mro-base version. deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_interp)) else: conda_name = 'r-' + name.lower() if dep_dict[name]: deps.append('{indent}{name} {version}'.format(name=conda_name, version=dep_dict[name], indent=INDENT)) else: deps.append('{indent}{name}'.format(name=conda_name, indent=INDENT)) if recursive: lower_name = name.lower() if lower_name not in package_dicts: inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, lower_name) assert lower_name == inputs_dict['pkg-name'], \ "name %s != inputs_dict['pkg-name'] %s" % ( name, inputs_dict['pkg-name']) assert lower_name not in package_list package_dicts.update({lower_name: {'inputs': inputs_dict}}) package_list.append(lower_name) d['%s_depends' % dep_type] = ''.join(deps) for package in package_dicts: d = package_dicts[package] dir_path = d['inputs']['new-location'] if exists(dir_path) and not version_compare: if update_policy == 'error': raise RuntimeError("directory already exists " "(and --update-policy is 'error'): %s" % dir_path) elif update_policy == 'overwrite': rm_rf(dir_path) elif update_policy == 'skip-up-to-date' and up_to_date(cran_metadata, d['inputs']['old-metadata']): continue elif update_policy == 'skip-existing' and d['inputs']['old-metadata']: continue # Normalize the metadata values d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore') .decode() for k, v in iteritems(d)} try: makedirs(join(dir_path)) except: pass print("Writing recipe for %s" % package.lower()) with open(join(dir_path, 'meta.yaml'), 'w') as f: f.write(clear_whitespace(CRAN_META.format(**d))) if not exists(join(dir_path, 'build.sh')) or update_policy == 'overwrite': with open(join(dir_path, 'build.sh'), 'w') as f: if from_source == all: f.write(CRAN_BUILD_SH_SOURCE.format(**d)) elif from_source == []: f.write(CRAN_BUILD_SH_BINARY.format(**d)) else: tpbt = [target_platform_bash_test_by_sel[t] for t in from_source] d['source_pf_bash'] = ' || '.join(['[[ $target_platform ' + s + ' ]]' for s in tpbt]) f.write(CRAN_BUILD_SH_MIXED.format(**d)) if not exists(join(dir_path, 'bld.bat')) or update_policy == 'overwrite': with open(join(dir_path, 'bld.bat'), 'w') as f: if len([fs for fs in from_source if fs.startswith('win')]) == 2: f.write(CRAN_BLD_BAT_SOURCE.format(**d)) else: f.write(CRAN_BLD_BAT_MIXED.format(**d))
def skeletonize( packages, output_dir=".", version=None, git_tag=None, cran_url="http://cran.r-project.org/", recursive=False, archive=True, version_compare=False, update_outdated=False, config=None, ): if not config: config = Config() if len(packages) > 1 and version_compare: raise ValueError("--version-compare only works with one package at a time") if not update_outdated and not packages: raise ValueError("At least one package must be supplied") package_dicts = {} cran_metadata = get_cran_metadata(cran_url, output_dir) if update_outdated: packages = get_outdated(output_dir, cran_metadata, packages) for pkg in packages: rm_rf(join(output_dir[0], "r-" + pkg)) while packages: package = packages.pop() is_github_url = "github.com" in package url = package if is_github_url: rm_rf(config.work_dir) source.git_source({"git_url": package}, ".", config=config) git_tag = git_tag[0] if git_tag else get_latest_git_tag(config) p = subprocess.Popen( ["git", "checkout", git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=config.work_dir ) stdout, stderr = p.communicate() stdout = stdout.decode("utf-8") stderr = stderr.decode("utf-8") if p.returncode: sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (git_tag, stderr.strip())) if stdout: print(stdout, file=sys.stdout) if stderr: print(stderr, file=sys.stderr) DESCRIPTION = join(config.work_dir, "DESCRIPTION") if not isfile(DESCRIPTION): sub_description_pkg = join(config.work_dir, "pkg", "DESCRIPTION") sub_description_name = join(config.work_dir, package.split("/")[-1], "DESCRIPTION") if isfile(sub_description_pkg): DESCRIPTION = sub_description_pkg elif isfile(sub_description_name): DESCRIPTION = sub_description_name else: sys.exit( "%s does not appear to be a valid R package " "(no DESCRIPTION file in %s, %s)" % (package, sub_description_pkg, sub_description_name) ) with open(DESCRIPTION) as f: description_text = clear_trailing_whitespace(f.read()) d = dict_from_cran_lines(remove_package_line_continuations(description_text.splitlines())) d["orig_description"] = description_text package = d["Package"].lower() cran_metadata[package] = d if package.startswith("r-"): package = package[2:] if package.endswith("/"): package = package[:-1] if package.lower() not in cran_metadata: sys.exit("Package %s not found" % package) # Make sure package is always uses the CRAN capitalization package = cran_metadata[package.lower()]["Package"] if not is_github_url: session = get_session(output_dir) cran_metadata[package.lower()].update(get_package_metadata(cran_url, package, session)) dir_path = join(output_dir, "r-" + package.lower()) if exists(dir_path) and not version_compare: raise RuntimeError("directory already exists: %s" % dir_path) cran_package = cran_metadata[package.lower()] d = package_dicts.setdefault( package, { "cran_packagename": package, "packagename": "r-" + package.lower(), "build_depends": "", "run_depends": "", # CRAN doesn't seem to have this metadata :( "home_comment": "#", "homeurl": "", "summary_comment": "#", "summary": "", }, ) if is_github_url: d["url_key"] = "" d["fn_key"] = "" d["git_url_key"] = "git_url:" d["git_tag_key"] = "git_tag:" d["filename"] = "" d["cranurl"] = "" d["git_url"] = url d["git_tag"] = git_tag else: d["url_key"] = "url:" d["fn_key"] = "fn:" d["git_url_key"] = "" d["git_tag_key"] = "" d["git_url"] = "" d["git_tag"] = "" if version: d["version"] = version raise NotImplementedError("Package versions from CRAN are not yet implemented") d["cran_version"] = cran_package["Version"] # Conda versions cannot have -. Conda (verlib) will treat _ as a . d["conda_version"] = d["cran_version"].replace("-", "_") if version_compare: sys.exit(not version_compare(dir_path, d["conda_version"])) if not is_github_url: d["filename"] = "{cran_packagename}_{cran_version}.tar.gz".format(**d) if archive: d["cranurl"] = ( INDENT + cran_url + "src/contrib/" + d["filename"] + INDENT + cran_url + "src/contrib/" + "Archive/" + d["cran_packagename"] + "/" + d["filename"] ) else: d["cranurl"] = " " + cran_url + "src/contrib/" + d["filename"] d["cran_metadata"] = "\n".join(["# %s" % l for l in cran_package["orig_lines"] if l]) # XXX: We should maybe normalize these d["license"] = cran_package.get("License", "None") d["license_family"] = guess_license_family(d["license"], allowed_license_families) if "License_is_FOSS" in cran_package: d["license"] += " (FOSS)" if cran_package.get("License_restricts_use") == "yes": d["license"] += " (Restricts use)" if "URL" in cran_package: d["home_comment"] = "" d["homeurl"] = " " + yaml_quote_string(cran_package["URL"]) if "Description" in cran_package: d["summary_comment"] = "" d["summary"] = " " + yaml_quote_string(cran_package["Description"]) if "Suggests" in cran_package: d["suggests"] = "# Suggests: %s" % cran_package["Suggests"] else: d["suggests"] = "" # Every package depends on at least R. # I'm not sure what the difference between depends and imports is. depends = [s.strip() for s in cran_package.get("Depends", "").split(",") if s.strip()] imports = [s.strip() for s in cran_package.get("Imports", "").split(",") if s.strip()] links = [s.strip() for s in cran_package.get("LinkingTo", "").split(",") if s.strip()] dep_dict = {} for s in set(chain(depends, imports, links)): match = VERSION_DEPENDENCY_REGEX.match(s) if not match: sys.exit("Could not parse version from dependency of %s: %s" % (package, s)) name = match.group("name") archs = match.group("archs") relop = match.group("relop") or "" version = match.group("version") or "" version = version.replace("-", "_") # If there is a relop there should be a version assert not relop or version if archs: sys.exit("Don't know how to handle archs from dependency of " "package %s: %s" % (package, s)) dep_dict[name] = "{relop}{version}".format(relop=relop, version=version) if "R" not in dep_dict: dep_dict["R"] = "" for dep_type in ["build", "run"]: deps = [] for name in sorted(dep_dict): if name in R_BASE_PACKAGE_NAMES: continue if name == "R": # Put R first # Regarless of build or run, and whether this is a recommended package or not, # it can only depend on 'r-base' since anything else can and will cause cycles # in the dependency graph. The cran metadata lists all dependencies anyway, even # those packages that are in the recommended group. r_name = "r-base" # We don't include any R version restrictions because we # always build R packages against an exact R version deps.insert(0, "{indent}{r_name}".format(indent=INDENT, r_name=r_name)) else: conda_name = "r-" + name.lower() if dep_dict[name]: deps.append( "{indent}{name} {version}".format(name=conda_name, version=dep_dict[name], indent=INDENT) ) else: deps.append("{indent}{name}".format(name=conda_name, indent=INDENT)) if recursive: if not exists(join(output_dir, conda_name)): packages.append(name) if cran_package.get("NeedsCompilation", "no") == "yes": if dep_type == "build": deps.append("{indent}posix # [win]".format(indent=INDENT)) deps.append("{indent}{{{{native}}}}toolchain # [win]".format(indent=INDENT)) deps.append("{indent}gcc # [not win]".format(indent=INDENT)) d["%s_depends" % dep_type] = "".join(deps) for package in package_dicts: d = package_dicts[package] name = d["packagename"] # Normalize the metadata values d = {k: unicodedata.normalize("NFKD", text_type(v)).encode("ascii", "ignore").decode() for k, v in iteritems(d)} makedirs(join(output_dir, name)) print("Writing recipe for %s" % package.lower()) with open(join(output_dir, name, "meta.yaml"), "w") as f: f.write(clear_trailing_whitespace(CRAN_META.format(**d))) with open(join(output_dir, name, "build.sh"), "w") as f: f.write(CRAN_BUILD_SH.format(**d)) with open(join(output_dir, name, "bld.bat"), "w") as f: f.write(CRAN_BLD_BAT.format(**d)) print("Done")