def generate_recipes(app): """ Go through every folder in the `bioconda-recipes/recipes` dir, have a README.rst file generated and generate a recipes.rst from the collected data. """ renderer = Renderer(app) load_config(os.path.join(os.path.dirname(RECIPE_DIR), "config.yml")) repodata = RepoData() repodata.set_cache(op.join(app.env.doctreedir, 'RepoDataCache.csv')) # force loading repodata to avoid duplicate loads from threads repodata.df # pylint: disable=pointless-statement recipes: List[Dict[str, Any]] = [] recipe_dirs = os.listdir(RECIPE_DIR) if parallel_available and len(recipe_dirs) > 5: nproc = app.parallel else: nproc = 1 if nproc == 1: for folder in status_iterator(recipe_dirs, 'Generating package READMEs...', "purple", len(recipe_dirs), app.verbosity): if not op.isdir(op.join(RECIPE_DIR, folder)): logger.error("Item '%s' in recipes folder is not a folder", folder) continue recipes.extend(generate_readme(folder, repodata, renderer)) else: tasks = ParallelTasks(nproc) chunks = make_chunks(recipe_dirs, nproc) def process_chunk(chunk): _recipes: List[Dict[str, Any]] = [] for folder in chunk: if not op.isdir(op.join(RECIPE_DIR, folder)): logger.error("Item '%s' in recipes folder is not a folder", folder) continue _recipes.extend(generate_readme(folder, repodata, renderer)) return _recipes def merge_chunk(_chunk, res): recipes.extend(res) for chunk in status_iterator( chunks, 'Generating package READMEs with {} threads...'.format(nproc), "purple", len(chunks), app.verbosity): tasks.add_task(process_chunk, chunk, merge_chunk) logger.info("waiting for workers...") tasks.join()
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder, role, target, node, contnode): """Resolve the ``pending_xref`` **node** with the given **role** and **target**.""" for objtype in self.objtypes_for_role(role) or []: if (objtype, target) in self.data['objects']: node = make_refnode(builder, fromdocname, self.data['objects'][objtype, target][0], self.data['objects'][objtype, target][1], contnode, target + ' ' + objtype) node.set_class('conda-package') return node if objtype == "package": for channel, urlformat in env.app.config.bioconda_other_channels.items( ): if RepoData().get_package_data(channels=channel, name=target): uri = urlformat.format(target) node = nodes.reference('', '', internal=False, refuri=uri, classes=[channel]) node += contnode return node return None # triggers missing-reference
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder, typ, target, node, contnode): # docs copied from Domain class """Resolve the pending_xref *node* with the given *typ* and *target*. This method should return a new node, to replace the xref node, containing the *contnode* which is the markup content of the cross-reference. If no resolution can be found, None can be returned; the xref node will then given to the :event:`missing-reference` event, and if that yields no resolution, replaced by *contnode*. The method can also raise :exc:`sphinx.environment.NoUri` to suppress the :event:`missing-reference` event being emitted. """ if typ == 'depends': # 'depends' role is handled just like a 'package' here (resolves the same) typ = 'package' elif typ == 'requiredby': # 'requiredby' role type is deferred to missing_references stage return None for objtype in self.objtypes_for_role(typ): if (objtype, target) in self.data['objects']: node = make_refnode(builder, fromdocname, self.data['objects'][objtype, target][0], self.data['objects'][objtype, target][1], contnode, target + ' ' + objtype) node.set_class('conda-package') return node if objtype == "package": # Avoid going through the entire repodata CF - we cache a set of the # packages available via conda-forge here. if not hasattr(env, 'conda_forge_packages'): pkgs = set(RepoData().get_package_data( 'name', channels='conda-forge')) env.conda_forge_packages = pkgs else: pkgs = env.conda_forge_packages if target in pkgs: uri = CONDA_FORGE_FORMAT.format(target) node = nodes.reference('', '', internal=False, refuri=uri, classes=['conda-forge']) node += contnode return node return None # triggers missing-reference
def generate_recipes(app): """ Go through every folder in the `bioconda-recipes/recipes` dir, have a README.rst file generated and generate a recipes.rst from the collected data. """ renderer = Renderer(app) repodata = RepoData() recipes = [] recipe_dirs = os.listdir(RECIPE_DIR) if parallel_available and len(recipe_dirs) > 5: nproc = app.parallel else: nproc = 1 if nproc == 1: for folder in status_iterator(recipe_dirs, 'Generating package READMEs...', "purple", len(recipe_dirs), app.verbosity): recipes.extend(generate_readme(folder, repodata, renderer)) else: tasks = ParallelTasks(nproc) chunks = make_chunks(recipe_dirs, nproc) def process_chunk(chunk): _recipes = [] for folder in chunk: _recipes.extend(generate_readme(folder, repodata, renderer)) return _recipes def merge_chunk(chunk, res): recipes.extend(res) for chunk in status_iterator( chunks, 'Generating package READMEs with {} threads...'.format(nproc), "purple", len(chunks), app.verbosity): tasks.add_task(process_chunk, chunk, merge_chunk) logger.info("waiting for workers...") tasks.join() updated = renderer.render_to_file( "source/recipes.rst", "recipes.rst_t", { 'recipes': recipes, # order of columns in the table; must be keys in template_options 'keys': ['Package', 'Version', 'License', 'Linux', 'OSX'] }) if updated: logger.info("Updated source/recipes.rst")
def generate_recipes(app): """Generates recipe RST files - Checks out repository - Prepares `RepoData` - Selects recipes (if `BIOCONDA_FILTER_RECIPES` in environment) - Dispatches calls to `generate_readme` for each recipe - Removes old RST files """ source_dir = app.env.srcdir doctree_dir = app.env.doctreedir # .../build/doctrees repo_dir = op.join(op.dirname(app.env.srcdir), "_bioconda_recipes") recipe_basedir = op.join(repo_dir, app.config.bioconda_recipes_path) repodata_cache_file = op.join(doctree_dir, 'RepoDataCache.pkl') repo_config_file = os.path.join(repo_dir, app.config.bioconda_config_file) output_dir = op.join(source_dir, 'recipes') # Initialize Repo and point globals at the right place repo = BiocondaRepo(folder=repo_dir, home=app.config.bioconda_repo_url) repo.checkout_master() load_config(repo_config_file) logger.info("Preloading RepoData") repodata = RepoData() repodata.set_cache(repodata_cache_file) repodata.df # pylint: disable=pointless-statement logger.info("Preloading RepoData (done)") # Collect recipe names recipe_dirs = os.listdir(recipe_basedir) if 'BIOCONDA_FILTER_RECIPES' in os.environ: limiter = os.environ['BIOCONDA_FILTER_RECIPES'] try: recipe_dirs = recipe_dirs[:int(limiter)] except ValueError: match = re.compile(limiter) recipe_dirs = [ recipe for recipe in recipe_dirs if match.search(recipe) ] # Set up renderer preparing recipe readme.rst files recipe_base_url = "{base}/tree/master/{recipes}/".format( base=app.config.bioconda_repo_url.rstrip(".git"), recipes=app.config.bioconda_recipes_path) renderer = Renderer(app, {'gh_recipes': recipe_base_url}) recipes: List[str] = [] if parallel_available and len(recipe_dirs) > 5: nproc = app.parallel else: nproc = 1 if nproc == 1: for folder in status_iterator(recipe_dirs, 'Generating package READMEs...', "purple", len(recipe_dirs), app.verbosity): if not op.isdir(op.join(recipe_basedir, folder)): logger.error("Item '%s' in recipes folder is not a folder", folder) continue recipes.extend( generate_readme(recipe_basedir, output_dir, folder, repodata, renderer)) else: tasks = ParallelTasks(nproc) chunks = make_chunks(recipe_dirs, nproc) def process_chunk(chunk): _recipes: List[Dict[str, Any]] = [] for folder in chunk: if not op.isdir(op.join(recipe_basedir, folder)): logger.error("Item '%s' in recipes folder is not a folder", folder) continue _recipes.extend( generate_readme(recipe_basedir, output_dir, folder, repodata, renderer)) return _recipes def merge_chunk(_chunk, res): recipes.extend(res) for chunk in status_iterator( chunks, 'Generating package READMEs with {} threads...'.format(nproc), "purple", len(chunks), app.verbosity): tasks.add_task(process_chunk, chunk, merge_chunk) logger.info("waiting for workers...") tasks.join() files_wanted = set(recipes) for root, dirs, files in os.walk(output_dir, topdown=False): for fname in files: path = op.join(root, fname) if path not in files_wanted: os.unlink(path) for dname in dirs: try: os.rmdir(op.join(root, dname)) except OSError: pass
def generate_recipes(app): """Generates recipe RST files - Checks out repository - Prepares `RepoData` - Selects recipes (if BIOCONDA_FILTER_RECIPES in environment) - Dispatches calls to `generate_readme` for each recipe - Removes old RST files """ source_dir = app.env.srcdir doctree_dir = app.env.doctreedir # .../build/doctrees repo_dir = op.join(op.dirname(app.env.srcdir), "_bioconda_recipes") recipe_basedir = op.join(repo_dir, app.config.bioconda_recipes_path) repodata_cache_file = op.join(doctree_dir, 'RepoDataCache.pkl') repo_config_file = os.path.join(repo_dir, app.config.bioconda_config_file) output_dir = op.join(source_dir, 'recipes') # Initialize Repo and point globals at the right place repo = BiocondaRepo(folder=repo_dir, home=app.config.bioconda_repo_url) repo.checkout_master() load_config(repo_config_file) logger.info("Preloading RepoData") repodata = RepoData() repodata.set_cache(repodata_cache_file) repodata.df # pylint: disable=pointless-statement logger.info("Preloading RepoData (done)") # Collect recipe names recipe_dirs = os.listdir(recipe_basedir) if 'BIOCONDA_FILTER_RECIPES' in os.environ: limiter = os.environ['BIOCONDA_FILTER_RECIPES'] try: recipe_dirs = recipe_dirs[:int(limiter)] except ValueError: match = re.compile(limiter) recipe_dirs = [recipe for recipe in recipe_dirs if match.search(recipe)] # Set up renderer preparing recipe readme.rst files recipe_base_url = "{base}/tree/master/{recipes}/".format( base=app.config.bioconda_repo_url.rstrip(".git"), recipes=app.config.bioconda_recipes_path ) renderer = Renderer(app, {'gh_recipes': recipe_base_url}) recipes: List[str] = [] if parallel_available and len(recipe_dirs) > 5: nproc = app.parallel else: nproc = 1 if nproc == 1: for folder in status_iterator( recipe_dirs, 'Generating package READMEs...', "purple", len(recipe_dirs), app.verbosity): if not op.isdir(op.join(recipe_basedir, folder)): logger.error("Item '%s' in recipes folder is not a folder", folder) continue recipes.extend(generate_readme(recipe_basedir, output_dir, folder, repodata, renderer)) else: tasks = ParallelTasks(nproc) chunks = make_chunks(recipe_dirs, nproc) def process_chunk(chunk): _recipes: List[Dict[str, Any]] = [] for folder in chunk: if not op.isdir(op.join(recipe_basedir, folder)): logger.error("Item '%s' in recipes folder is not a folder", folder) continue _recipes.extend(generate_readme(recipe_basedir, output_dir, folder, repodata, renderer)) return _recipes def merge_chunk(_chunk, res): recipes.extend(res) for chunk in status_iterator( chunks, 'Generating package READMEs with {} threads...'.format(nproc), "purple", len(chunks), app.verbosity): tasks.add_task(process_chunk, chunk, merge_chunk) logger.info("waiting for workers...") tasks.join() files_wanted = set(recipes) for root, dirs, files in os.walk(output_dir, topdown=False): for fname in files: path = op.join(root, fname) if path not in files_wanted: os.unlink(path) for dname in dirs: try: os.rmdir(op.join(root, dname)) except OSError: pass
def generate_recipes(app): """ Go through every folder in the `ggd-recipes/recipes` dir, have a README.rst file generated and generate a recipes.rst from the collected data. """ renderer = Renderer(app) load_config(os.path.join(os.path.dirname(RECIPE_DIR), "config.yaml")) repodata = RepoData() # Add ggd channels to repodata object #repodata.channels = ['ggd-genomics', 'conda-forge', 'bioconda', 'defaults'] recipes = [] ## Get each folder that contains a meat.yaml file recipe_dirs = [] for root, dirs, files in os.walk(RECIPE_DIR): if "meta.yaml" in files: recipe_dirs.append(root) if parallel_available and len(recipe_dirs) > 5: nproc = app.parallel else: nproc = 1 if nproc == 1: for folder in status_iterator( recipe_dirs, 'Generating package READMEs...', "purple", len(recipe_dirs), app.verbosity): recipes.extend(generate_readme(folder, repodata, renderer)) else: tasks = ParallelTasks(nproc) chunks = make_chunks(recipe_dirs, nproc) def process_chunk(chunk): _recipes = [] for folder in chunk: _recipes.extend(generate_readme(folder, repodata, renderer)) return _recipes def merge_chunk(chunk, res): recipes.extend(res) for chunk in status_iterator( chunks, 'Generating package READMEs with {} threads...'.format(nproc), "purple", len(chunks), app.verbosity): tasks.add_task(process_chunk, chunk, merge_chunk) logger.info("waiting for workers...") tasks.join() updated = renderer.render_to_file("source/recipes.rst", "recipes.rst_t", { 'recipes': recipes, # order of columns in the table; must be keys in template_options 'keys': ['Package', 'Version', 'Linux', 'OSX', 'NOARCH'], 'noarch_symbol': '<i class="fa fa-desktop"></i>', 'linux_symbol': '<i class="fa fa-linux"></i>', 'osx_symbol': '<i class="fa fa-apple"></i>', 'dot_symbol': '<i class="fa fa-dot-circle-o"></i>' }) if updated: logger.info("Updated source/recipes.rst")