def get_language(source, code, language_name=None): """ Get the current language we're documenting, based on the extension. """ if language_name is not None: for entry in supported_languages.values(): if entry["name"] == language_name: return entry else: raise ValueError( "Unknown forced language: {}".format(language_name)) if source: m = re.match(r".*(\..+)", os.path.basename(source)) if m and m.group(1) in supported_languages: return supported_languages[m.group(1)] try: language_name = lexers.guess_lexer(code).name.lower() for entry in supported_languages.values(): if entry["name"] == language_name: return entry else: raise ValueError() except ValueError: # If pygments can't find any lexers, it will raise its own # subclass of ValueError. We will catch it and raise ours # for consistency. raise ValueError("Can't figure out the language!")
def get_language(source, code, language_name=None): """ Get the current language we're documenting, based on the extension. """ if language_name is not None: for entry in supported_languages.values(): if entry["name"] == language_name: return entry else: raise ValueError("Unknown forced language: {}".format(language_name)) if source: m = re.match(r'.*(\..+)', os.path.basename(source)) if m and m.group(1) in supported_languages: return supported_languages[m.group(1)] try: language_name = lexers.guess_lexer(code).name.lower() for entry in supported_languages.values(): if entry["name"] == language_name: return entry else: raise ValueError() except ValueError: # If pygments can't find any lexers, it will raise its own # subclass of ValueError. We will catch it and raise ours # for consistency. raise ValueError("Can't figure out the language!")
def test_process(preserve_paths, index, choice): lang_name = choice([l["name"] for l in supported_languages.values()]) p.process([PYCCO_SOURCE], preserve_paths=preserve_paths, index=index, outdir=tempfile.gettempdir(), language=lang_name)
l["comment_matcher"] = re.compile(r"^\s*{}\s?".format(comment_symbol)) # The dividing token we feed into Pygments, to delimit the boundaries between # sections. l["divider_text"] = "\n{}DIVIDER\n".format(comment_symbol) # The mirror of `divider_text` that we expect Pygments to return. We can split # on this to recover the original sections. l["divider_html"] = re.compile( r'\n*<span class="c[1]?">{}DIVIDER</span>\n*'.format(comment_symbol)) # Get the Pygments Lexer for this language. l["lexer"] = lexers.get_lexer_by_name(language_name) for entry in supported_languages.values(): compile_language(entry) def get_language(source, code, language_name=None): """ Get the current language we're documenting, based on the extension. """ if language_name is not None: for entry in supported_languages.values(): if entry["name"] == language_name: return entry else: raise ValueError( "Unknown forced language: {}".format(language_name))
def get_language(choice): return choice(list(supported_languages.values()))
def test_process(preserve_paths, index, choice): lang_name = choice([l["name"] for l in supported_languages.values()]) p.process([PYCCO_SOURCE], preserve_paths=preserve_paths, index=index, outdir=tempfile.gettempdir(), language=lang_name)
def get_language(data): return data.draw(sampled_from(list(supported_languages.values())))
def test_process(preserve_paths, index, data): lang_name = data.draw(sampled_from([l["name"] for l in supported_languages.values()])) p.process([PYCCO_SOURCE], preserve_paths=preserve_paths, index=index, outdir=tempfile.gettempdir(), language=lang_name)
def get_language(choice): return choice(list(supported_languages.values()))
# The dividing token we feed into Pygments, to delimit the boundaries between # sections. l["divider_text"] = "\n{}DIVIDER\n".format(comment_symbol) # The mirror of `divider_text` that we expect Pygments to return. We can split # on this to recover the original sections. l["divider_html"] = re.compile( r'\n*<span class="c[1]?">{}DIVIDER</span>\n*'.format(comment_symbol) ) # Get the Pygments Lexer for this language. l["lexer"] = lexers.get_lexer_by_name(language_name) for entry in supported_languages.values(): compile_language(entry) def get_language(source, code, language_name=None): """ Get the current language we're documenting, based on the extension. """ if language_name is not None: for entry in supported_languages.values(): if entry["name"] == language_name: return entry else: raise ValueError("Unknown forced language: {}".format(language_name)) if source: m = re.match(r'.*(\..+)', os.path.basename(source))