예제 #1
0
    def check_file(
        self,
        filename: PathLike,
        extension: Optional[str] = None,
        newline: Optional[str] = '\n',
        **kwargs,
    ):
        r"""
		Check the content of the given text file against the reference file.

		:param filename:
		:param extension: The extension of the reference file.
			If :py:obj:`None` the extension is determined from ``filename``.
		:param newline: Controls how universal newlines mode works. See :func:`open`.
		:param \*\*kwargs: Additional keyword arguments passed to
			:meth:`pytest_regressions.file_regression.FileRegressionFixture.check`.

		.. seealso:: :func:`~.check_file_output`
		"""

        filename = PathPlus(filename)

        data = filename.read_text(encoding="UTF-8")
        extension = extension or filename.suffix

        if extension == ".py":
            extension = "._py_"

        __tracebackhide__ = True

        return self.check(data, extension=extension, newline=newline, **kwargs)
예제 #2
0
def validate_files(
    schemafile: PathLike,
    *datafiles: PathLike,
    encoding: str = "utf-8",
) -> None:
    r"""
	Validate the given datafiles against the given schema.

	:param schemafile: The ``json`` or ``yaml`` formatted schema to validate with.
	:param \*datafiles: The ``json`` or ``yaml`` files to validate.
	:param encoding: Encoding to open the files with.

	.. versionadded:: 0.4.0
	"""

    schemafile = pathlib.Path(schemafile)

    yaml = YAML(typ="safe", pure=True)
    schema = yaml.load(schemafile.read_text(encoding=encoding))

    for filename in datafiles:
        for document in yaml.load_all(
                pathlib.Path(filename).read_text(encoding=encoding)):
            try:
                jsonschema.validate(document,
                                    schema,
                                    format_checker=jsonschema.FormatChecker())
            except jsonschema.exceptions.ValidationError as e:
                e.filename = str(filename)
                raise e
예제 #3
0
            def check_fn(obtained_filename: PathPlus,
                         expected_filename: PathLike):
                expected_filename = PathPlus(expected_filename)

                template = Template(
                    expected_filename.read_text(),
                    block_start_string="<%",
                    block_end_string="%>",
                    variable_start_string="<<",
                    variable_end_string=">>",
                    comment_start_string="<#",
                    comment_end_string="#>",
                )

                expected_filename.write_text(
                    template.render(
                        sphinx_version=sphinx.version_info,
                        python_version=sys.version_info,
                        docutils_version=docutils_version,
                        **jinja2_namespace or {},
                    ))

                return check_text_files(obtained_filename,
                                        expected_filename,
                                        encoding="UTF-8")
예제 #4
0
    def load_file(self, filename: PathLike) -> Union[Dict, List]:
        """
		Load the given YAML file and return its contents.

		:param filename:
		"""

        filename = PathPlus(filename)
        return self.load(filename.read_text())
예제 #5
0
	def bump_version_for_file(self, filename: PathLike, config: BumpversionFileConfig):
		"""
		Bumps the version for the given file.

		:param filename:
		:param config:
		"""

		filename = self.repo.target_repo / filename
		filename.write_text(filename.read_text().replace(config["search"], config["replace"]))
예제 #6
0
def check_and_add_all(filename: PathLike, quote_type: str = '"') -> int:
	"""
	Check the given filename for the presence of a ``__all__`` declaration, and add one if none is found.

	:param filename: The filename of the Python source file (``.py``) to check.
	:param quote_type: The type of quote to use for strings.

	:returns:

	* ``0`` if the file already contains a ``__all__`` declaration,
	  has no function or class definitions, or has a ``  # noqa: DALL000  ` comment.
	* ``1`` If ``__all__`` is absent.
	* ``4`` if an error was encountered when parsing the file.

	.. versionchanged:: 0.2.0

		Now returns ``0`` and doesn't add ``__all__`` if the file contains a ``noqa: DALL000`` comment.
	"""

	quotes = {"'", '"'}
	quotes.remove(quote_type)
	bad_quote, *_ = tuple(quotes)

	filename = PathPlus(filename)

	try:
		source = filename.read_text()
		for line in source.splitlines():
			noqas = find_noqa(line)
			if noqas is not None:
				if noqas["codes"]:
					# pylint: disable=loop-invariant-statement
					noqa_list: List[str] = noqas["codes"].rstrip().upper().split(',')
					if "DALL000" in noqa_list:
						return 0
					# pylint: enable=loop-invariant-statement

		tree = ast.parse(source)
		if sys.version_info < (3, 8):  # pragma: no cover (py38+)
			mark_text_ranges(tree, source)

	except SyntaxError:
		stderr_writer(Fore.RED(f"'{filename}' does not appear to be a valid Python source file."))
		return 4

	visitor = Visitor(use_endlineno=True)
	visitor.visit(tree)

	if visitor.found_all:
		return 0
	else:
		docstring_start = (get_docstring_lineno(tree) or 0) - 1
		docstring = ast.get_docstring(tree, clean=False) or ''
		docstring_end = len(docstring.split('\n')) + docstring_start

		insertion_position = max(docstring_end, visitor.last_import) + 1

		if not visitor.members:
			return 0

		members = repr(sorted(visitor.members)).replace(bad_quote, quote_type)

		lines = filename.read_text().split('\n')

		# Ensure there don't end up too many lines
		if lines[insertion_position].strip():
			lines.insert(insertion_position, '\n')
		else:
			lines.insert(insertion_position, '')

		lines.insert(insertion_position, f"__all__ = {members}")

		filename.write_clean('\n'.join(lines))

		return 1
예제 #7
0
def get_tokens(filename: PathLike) -> typing.Counter[str]:
    """
	Returns a :class:`collections.Counter` of the tokens in a file.

	:param filename: The file to parse.

	:return: A count of words etc. in the file.
	"""

    total: typing.Counter[str] = Counter()

    filename = PathPlus(filename)

    try:
        lex = pygments.lexers.get_lexer_for_filename(filename)
    except pygments.util.ClassNotFound:
        return total

    for token in lex.get_tokens(filename.read_text()):
        if token[0] in pygments.token.Comment:
            continue

        if token[0] in pygments.token.Text:
            if token[1] == '\n':
                continue
            if token[1] == ' ':
                continue
            if re.match(r"^\t*$", token[1]):
                continue
            if re.match(r"^\s*$", token[1]):
                continue

        if token[0] in pygments.token.String:
            if token[1] == '"':
                continue

            if token[0] in pygments.token.String.Escape:
                if re.match(r"\\*", token[1]):
                    continue

        if token[0] in pygments.token.String.Double:
            if token[1] == '\n':
                continue
            if re.match(r'^"*$', token[1]):
                continue

        if token[0] in pygments.token.String.Single:
            if token[1] == '\n':
                continue
            if re.match(r"^'*$", token[1]):
                continue

        if token[0] in pygments.token.Punctuation and token[1] in "[],{}:();":
            continue

        if token[0] in pygments.token.Operator:
            continue

        if token[0] in pygments.token.String.Affix:
            continue

        if token[0] in pygments.token.String.Interpol and token[1] in "{}":
            continue

        if re.match("^:*$", token[1]):
            continue

        total += Counter(re.split("[ \n\t]", token[1]))

    punctuation_to_delete = ['', ' ']

    for word in total:
        if re.match(f"^[{punctuation}]+$", word):
            punctuation_to_delete.append(word)

    for word in punctuation_to_delete:
        del total[word]

    all_words: typing.Counter[str] = Counter()

    for word in total:
        if word.endswith(':'):
            all_words[word.rstrip(':')] = total[word]
        else:
            all_words[word] = total[word]

    return all_words