Exemple #1
0
def test__templater_jinja_large_file_check():
    """Test large file skipping.

    The check is seperately called on each .process() method
    so it makes sense to test a few templaters.
    """
    # First check we can process the file normally without specific config.
    # i.e. check the defaults work and the default is high.
    JinjaTemplater().process(
        in_str="SELECT 1",
        fname="<string>",
        config=FluffConfig(overrides={"dialect": "ansi"}),
    )
    # Second check setting the value low disables the check
    JinjaTemplater().process(
        in_str="SELECT 1",
        fname="<string>",
        config=FluffConfig(
            overrides={"dialect": "ansi", "large_file_skip_char_limit": 0}
        ),
    )
    # Finally check we raise a skip exception when config is set low.
    with pytest.raises(SQLFluffSkipFile) as excinfo:
        JinjaTemplater().process(
            in_str="SELECT 1",
            fname="<string>",
            config=FluffConfig(
                overrides={"dialect": "ansi", "large_file_skip_char_limit": 2},
            ),
        )

    assert "Length of file" in str(excinfo.value)
Exemple #2
0
def test__templater_dbt_templating_test_lex(
        project_dir,
        dbt_templater,
        fname  # noqa: F811
):
    """Demonstrate the lexer works on both dbt models and dbt tests.

    Handle any number of newlines.
    """
    source_fpath = os.path.join(project_dir, fname)
    with open(source_fpath, "r") as source_dbt_model:
        source_dbt_sql = source_dbt_model.read()
    n_trailing_newlines = len(source_dbt_sql) - len(
        source_dbt_sql.rstrip("\n"))
    lexer = Lexer(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
    templated_file, _ = dbt_templater.process(
        in_str="",
        fname=os.path.join(project_dir, fname),
        config=FluffConfig(configs=DBT_FLUFF_CONFIG),
    )
    tokens, lex_vs = lexer.lex(templated_file)
    assert (templated_file.source_str == "select a\nfrom table_a" +
            "\n" * n_trailing_newlines)
    assert (templated_file.templated_str == "select a\nfrom table_a" +
            "\n" * n_trailing_newlines)
Exemple #3
0
def test_linter_noqa_disable():
    """Test "noqa" comments can be disabled via the config."""
    lntr_noqa_enabled = Linter(
        config=FluffConfig(
            overrides={
                "rules": "L012",
            }
        )
    )
    lntr_noqa_disabled = Linter(
        config=FluffConfig(
            overrides={
                "disable_noqa": True,
                "rules": "L012",
            }
        )
    )
    # This query raises L012, but it is being suppressed by the inline noqa comment.
    # We can ignore this comment by setting disable_noqa = True in the config
    # or by using the --disable-noqa flag in the CLI.
    sql = """
    SELECT col_a a --noqa: L012
    FROM foo
    """

    # Verify that noqa works as expected with disable_noqa = False (default).
    result_noqa_enabled = lntr_noqa_enabled.lint_string(sql)
    violations_noqa_enabled = result_noqa_enabled.get_violations()
    assert len(violations_noqa_enabled) == 0

    # Verify that noqa comment is ignored with disable_noqa = True.
    result_noqa_disabled = lntr_noqa_disabled.lint_string(sql)
    violations_noqa_disabled = result_noqa_disabled.get_violations()
    assert len(violations_noqa_disabled) == 1
    assert violations_noqa_disabled[0].rule.code == "L012"
Exemple #4
0
def test__attempt_to_change_templater_warning(caplog):
    """Test warning when changing templater in .sqlfluff file in subdirectory."""
    initial_config = FluffConfig(
        configs={"core": {
            "templater": "jinja",
            "dialect": "ansi"
        }})
    lntr = Linter(config=initial_config)
    updated_config = FluffConfig(
        configs={"core": {
            "templater": "python",
            "dialect": "ansi"
        }})
    logger = logging.getLogger("sqlfluff")
    original_propagate_value = logger.propagate
    try:
        logger.propagate = True
        with caplog.at_level(logging.WARNING, logger="sqlfluff.linter"):
            lntr.render_string(
                in_str="select * from table",
                fname="test.sql",
                config=updated_config,
                encoding="utf-8",
            )
        assert "Attempt to set templater to " in caplog.text
    finally:
        logger.propagate = original_propagate_value
Exemple #5
0
def test__templater_dbt_templating_test_lex(in_dbt_project_dir, dbt_templater):  # noqa
    """A test to demonstrate _tests_as_models works on dbt tests by temporarily making them models."""
    lexer = Lexer(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
    templated_file, _ = dbt_templater.process(
        in_str="",
        fname="tests/test.sql",
        config=FluffConfig(configs=DBT_FLUFF_CONFIG),
    )
    tokens, lex_vs = lexer.lex(templated_file)
    assert templated_file.source_str == "select * from a"
    assert templated_file.templated_str == "select * from a"
Exemple #6
0
def test__templater_jinja():
    """Test jinja templating and the treatment of whitespace."""
    t = JinjaTemplateInterface(
        override_context=dict(blah="foo", condition="a < 10"))
    instr = JINJA_STRING
    outstr, _ = t.process(instr, config=FluffConfig())
    assert outstr == "SELECT * FROM f, o, o WHERE a < 10\n\n"
def test__dialect__base_file_parse(dialect, file):
    """For given test examples, check successful parsing."""
    raw = load_file(dialect, file)
    # Load the right dialect
    config = FluffConfig(overrides=dict(dialect=dialect))
    fs, lex_vs = FileSegment.from_raw(raw, config=config)
    # From just the initial parse, check we're all there
    assert fs.raw == raw
    # Check we don't have lexing issues
    assert not lex_vs

    # Do the parse WITHOUT lots of logging
    # The logs get too long here to be useful. We should use
    # specfic segment tests if we want to debug logs.
    # with caplog.at_level(logging.DEBUG):
    print("Pre-parse structure: {0}".format(fs.to_tuple(show_raw=True)))
    print("Pre-parse structure: {0}".format(fs.stringify()))
    with RootParseContext.from_config(
            config) as ctx:  # Optional: set recurse=1 to limit recursion
        parsed = fs.parse(parse_context=ctx)
    print("Post-parse structure: {0}".format(fs.to_tuple(show_raw=True)))
    print("Post-parse structure: {0}".format(fs.stringify()))
    # Check we're all there.
    assert parsed.raw == raw
    # Check that there's nothing un parsable
    typs = parsed.type_set()
    assert "unparsable" not in typs
Exemple #8
0
def test_linter_noqa_with_templating():
    """Similar to test_linter_noqa, but uses templating (Jinja)."""
    lntr = Linter(
        config=FluffConfig(
            overrides={
                "dialect": "bigquery",  # Use bigquery to allow hash comments.
                "templater": "jinja",
                "rules": "L016",
            }
        )
    )
    sql = "\n"
    '"{%- set a_var = ["1", "2"] -%}\n'
    "SELECT\n"
    "  this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_"
    "templated_sql_files, --noqa: L016\n"
    "  this_is_not_so_big a, --Inline comment --noqa: L012\n"
    "  this_is_not_so_big b, /* Block comment */ --noqa: L012\n"
    "  this_is_not_so_big c, # hash comment --noqa: L012\n"
    "  this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_"
    "templated_sql_files, --noqa: L01*\n"
    "FROM\n"
    "  a_table\n"
    "    "
    result = lntr.lint_string(sql)
    assert not result.get_violations()
Exemple #9
0
def test__linter__path_from_paths__exts():
    """Test configuration of file discovery."""
    lntr = Linter(config=FluffConfig(overrides={"sql_file_exts": ".txt"}))
    paths = normalise_paths(lntr.paths_from_path("test/fixtures/linter"))
    assert "test.fixtures.linter.passing.sql" not in paths
    assert "test.fixtures.linter.passing_cap_extension.SQL" not in paths
    assert "test.fixtures.linter.discovery_file.txt" in paths
Exemple #10
0
def test__linter__skip_large_bytes(filesize, raises_skip):
    """Test extracting paths from a file path."""
    config = FluffConfig(overrides={
        "large_file_skip_byte_limit": filesize,
        "dialect": "ansi"
    })
    # First check the function directly
    if raises_skip:
        with pytest.raises(SQLFluffSkipFile) as excinfo:
            Linter._load_raw_file_and_config(
                "test/fixtures/linter/indentation_errors.sql", config)
        assert "Skipping" in str(excinfo.value)
        assert f"over the limit of {filesize}" in str(excinfo.value)
    # If NOT raises, then we'll catch the raise an error and the test will fail.

    # Then check that it either is or isn't linted appropriately via lint_paths.
    lntr = Linter(config)
    result = lntr.lint_paths(
        ("test/fixtures/linter/indentation_errors.sql", ), )
    if raises_skip:
        assert not result.get_violations()
    else:
        assert result.get_violations()

    # Same again via parse_path, which is the other entry point.
    result = list(
        lntr.parse_path("test/fixtures/linter/indentation_errors.sql", ))
    if raises_skip:
        assert not result
    else:
        assert result
Exemple #11
0
def test__dialect__base_file_parse(dialect, file):
    """For given test examples, check successful parsing."""
    raw = load_file(dialect, file)
    # Load the right dialect
    config = FluffConfig(overrides=dict(dialect=dialect))
    tokens, lex_vs = Lexer(config=config).lex(raw)
    # From just the initial parse, check we're all there
    assert "".join(token.raw for token in tokens) == raw
    # Check we don't have lexing issues
    assert not lex_vs

    # Do the parse WITHOUT lots of logging
    # The logs get too long here to be useful. We should use
    # specfic segment tests if we want to debug logs.
    if raw:
        parsed = Parser(config=config).parse(tokens)
        print("Post-parse structure: {0}".format(
            parsed.to_tuple(show_raw=True)))
        print("Post-parse structure: {0}".format(parsed.stringify()))
        # Check we're all there.
        assert parsed.raw == raw
        # Check that there's nothing un parsable
        typs = parsed.type_set()
        assert "unparsable" not in typs
    else:
        # If it's an empty file, check that we get a value exception
        # here. The linter handles this by *not* parsing the file,
        # but in this case, we explicitly want an error.
        with pytest.raises(ValueError):
            Parser(config=config).parse(tokens)
Exemple #12
0
def test__cli__formatters__violation(tmpdir):
    """Test formatting violations.

    NB Position is 1 + start_pos.
    """
    s = RawSegment(
        "foobarbar",
        PositionMarker(
            slice(10, 19),
            slice(10, 19),
            TemplatedFile.from_string("      \n\n  foobarbar"),
        ),
    )
    r = RuleGhost("A", "DESC")
    v = SQLLintError(segment=s, rule=r)
    formatter = OutputStreamFormatter(
        FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False
    )
    f = formatter.format_violation(v)
    # Position is 3, 3 becase foobarbar is on the third
    # line (i.e. it has two newlines preceding it) and
    # it's at the third position in that line (i.e. there
    # are two characters between it and the preceding
    # newline).
    assert escape_ansi(f) == "L:   3 | P:   3 |    A | DESC"
Exemple #13
0
def test__cli__formatters__filename_nocol(tmpdir):
    """Test formatting filenames."""
    formatter = OutputStreamFormatter(
        FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False
    )
    res = formatter.format_filename("blahblah", success=True)
    assert escape_ansi(res) == "== [blahblah] PASS"
Exemple #14
0
def test__templater_dbt_sequence_files_ephemeral_dependency(
        project_dir,  # noqa: F811
        dbt_templater,  # noqa: F811
):
    """Test that dbt templater sequences files based on dependencies."""
    result = dbt_templater.sequence_files(
        [
            str(
                Path(project_dir) / "models" / "depends_on_ephemeral" /
                "a.sql"),
            str(
                Path(project_dir) / "models" / "depends_on_ephemeral" /
                "b.sql"),
            str(
                Path(project_dir) / "models" / "depends_on_ephemeral" /
                "c.sql"),
            str(
                Path(project_dir) / "models" / "depends_on_ephemeral" /
                "d.sql"),
        ],
        config=FluffConfig(configs=DBT_FLUFF_CONFIG),
    )
    # c.sql should come first because b.sql depends on c.sql.
    assert result == [
        str(Path(project_dir) / "models" / "depends_on_ephemeral" / "a.sql"),
        str(Path(project_dir) / "models" / "depends_on_ephemeral" / "c.sql"),
        str(Path(project_dir) / "models" / "depends_on_ephemeral" / "b.sql"),
        str(Path(project_dir) / "models" / "depends_on_ephemeral" / "d.sql"),
    ]
Exemple #15
0
def test__templater_jinja(instr, expected_outstr):
    """Test jinja templating and the treatment of whitespace."""
    t = JinjaTemplater(override_context=dict(blah="foo", condition="a < 10"))
    outstr, _ = t.process(
        in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"})
    )
    assert str(outstr) == expected_outstr
Exemple #16
0
def test_templater_set_block_handling():
    """Test handling of literals in {% set %} blocks.

    Specifically, verify they are not modified in the alternate template.
    """

    def run_query(sql):
        # Prior to the bug fix, this assertion failed. This was bad because,
        # inside JinjaTracer, dbt templates similar to the one in this test
        # would call the database with funky SQL (including weird strings it
        # uses internally like: 00000000000000000000000000000002.
        assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n"
        return sql

    t = JinjaTemplater(override_context=dict(run_query=run_query))
    instr = """{% set my_query1 %}
select 1 from foobarfoobarfoobarfoobar_{{ "dev" }}
{% endset %}
{% set my_query2 %}
{{ my_query1 }}
{% endset %}

{{ run_query(my_query2) }}
"""
    outstr, vs = t.process(
        in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"})
    )
    assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n"
    assert len(vs) == 0
Exemple #17
0
def test_lint_path_parallel_wrapper_exception(patched_base_run):
    """Tests the error catching behavior of _lint_path_parallel_wrapper()."""
    patched_base_run.side_effect = ValueError("Something unexpected happened")
    result = runner.MultiProcessRunner._lint_path(Linter, FluffConfig(), "")
    assert isinstance(result, runner.DelayedException)
    with pytest.raises(ValueError):
        result.reraise()
Exemple #18
0
def test__templater_jinja_error_catatrophic():
    """Test error handling in the jinja templater."""
    t = JinjaTemplater(override_context=dict(blah=7))
    instr = JINJA_STRING
    outstr, vs = t.process(in_str=instr, config=FluffConfig())
    assert not outstr
    assert len(vs) > 0
Exemple #19
0
def test_non_selects_unparseable(raw: str) -> None:
    """Test that non-SELECT commands are not parseable."""
    cfg = FluffConfig(configs={"core": {"dialect": "soql"}})
    lnt = Linter(config=cfg)
    result = lnt.lint_string(raw)
    assert len(result.violations) == 1
    assert isinstance(result.violations[0], SQLParseError)
Exemple #20
0
def test__templater_dbt_handle_exceptions(
        project_dir,
        dbt_templater,
        fname,
        exception_msg  # noqa: F811
):
    """Test that exceptions during compilation are returned as violation."""
    from dbt.adapters.factory import get_adapter

    src_fpath = "test/fixtures/dbt/error_models/" + fname
    target_fpath = os.path.abspath(
        os.path.join(project_dir, "models/my_new_project/", fname))
    # We move the file that throws an error in and out of the project directory
    # as dbt throws an error if a node fails to parse while computing the DAG
    os.rename(src_fpath, target_fpath)
    try:
        _, violations = dbt_templater.process(
            in_str="",
            fname=target_fpath,
            config=FluffConfig(configs=DBT_FLUFF_CONFIG),
        )
    finally:
        get_adapter(dbt_templater.dbt_config).connections.release()
        os.rename(target_fpath, src_fpath)
    assert violations
    # NB: Replace slashes to deal with different plaform paths being returned.
    assert violations[0].desc().replace("\\", "/").startswith(exception_msg)
Exemple #21
0
def test_linter_noqa():
    """Test "noqa" feature at the higher "Linter" level."""
    lntr = Linter(config=FluffConfig(overrides={
        "rules": "L012",
    }))
    sql = """
    SELECT
        col_a a,
        col_b b, --noqa: disable=L012
        col_c c,
        col_d d, --noqa: enable=L012
        col_e e,
        col_f f,
        col_g g,  --noqa
        col_h h,
        col_i i, --noqa:L012
        col_j j,
        col_k k, --noqa:L013
        col_l l,
        col_m m,
        col_n n, --noqa: disable=all
        col_o o,
        col_p p --noqa: enable=all
    FROM foo
        """
    result = lntr.lint_string(sql)
    violations = result.get_violations()
    assert {3, 6, 7, 8, 10, 12, 13, 14, 15,
            18} == {v.line_no
                    for v in violations}
Exemple #22
0
def test_bigquery_relational_operator_parsing(data):
    """Tests queries with a diverse mixture of relational operators."""
    # Generate a simple SELECT query with relational operators and conjunctions
    # as specified in 'data'. Note the conjunctions are used as separators
    # between comparisons, sn the conjunction in the first item is not used.
    filter = []
    for i, (relation, conjunction) in enumerate(data):
        if i:
            filter.append(f" {conjunction} ")
        filter.append(f"a {relation} b")
    raw = f'SELECT * FROM t WHERE {"".join(filter)}'
    note(f"query: {raw}")
    # Load the right dialect
    config = FluffConfig(overrides=dict(dialect="bigquery"))
    tokens, lex_vs = Lexer(config=config).lex(raw)
    # From just the initial parse, check we're all there
    assert "".join(token.raw for token in tokens) == raw
    # Check we don't have lexing issues
    assert not lex_vs

    # Do the parse WITHOUT lots of logging
    # The logs get too long here to be useful. We should use
    # specfic segment tests if we want to debug logs.
    parsed = Parser(config=config).parse(tokens)
    print("Post-parse structure: {0}".format(parsed.to_tuple(show_raw=True)))
    print("Post-parse structure: {0}".format(parsed.stringify()))
    # Check we're all there.
    assert parsed.raw == raw
    # Check that there's nothing un parsable
    typs = parsed.type_set()
    assert "unparsable" not in typs
Exemple #23
0
def test__templater_param_style(instr, expected_outstr, param_style, values):
    """Test different param_style templating."""
    t = PlaceholderTemplater(override_context={
        **values, "param_style": param_style
    })
    outstr, _ = t.process(in_str=instr, fname="test", config=FluffConfig())
    assert str(outstr) == expected_outstr
Exemple #24
0
def test__templater_dbt_profiles_dir_expanded(dbt_templater):  # noqa
    """Check that the profiles_dir is expanded."""
    dbt_templater.sqlfluff_config = FluffConfig(
        configs={"templater": {"dbt": {"profiles_dir": "~/.dbt"}}}
    )
    profiles_dir = dbt_templater._get_profiles_dir()
    assert profiles_dir == os.path.expanduser("~/.dbt")
Exemple #25
0
def test__linter__skip_dbt_model_disabled(in_dbt_project_dir):  # noqa
    """Test that the linter skips disabled dbt models."""
    conf = FluffConfig(configs={"core": {"templater": "dbt"}})
    lntr = Linter(config=conf)
    linted_path = lntr.lint_path(path="models/my_new_project/disabled_model.sql")
    linted_file = linted_path.files[0]
    assert linted_file.path == "models/my_new_project/disabled_model.sql"
    assert not linted_file.templated_file
Exemple #26
0
def test__linter__lint_ephemeral_3_level(project_dir):  # noqa
    """Test linter can lint a project with 3-level ephemeral dependencies."""
    # This was previously crashing inside dbt, in a function named
    # inject_ctes_into_sql(). (issue 2671).
    conf = FluffConfig(configs=DBT_FLUFF_CONFIG)
    lntr = Linter(config=conf)
    model_file_path = os.path.join(project_dir, "models/ephemeral_3_level")
    lntr.lint_path(path=model_file_path)
Exemple #27
0
def test__dialect__ansi__file_from_raw(raw, res, caplog):
    """Test we don't drop bits on simple examples."""
    config = FluffConfig(overrides=dict(dialect="ansi"))
    with caplog.at_level(logging.DEBUG):
        fs, _ = FileSegment.from_raw(raw, config=config)
    # From just the initial parse, check we're all there
    assert fs.raw == raw
    assert fs.raw_list() == res
Exemple #28
0
def test__rules__std_file(rule, path, violations):
    """Test the linter finds the given errors in (and only in) the right places."""
    assert_rule_raises_violations_in_file(
        rule=rule,
        fpath=path,
        violations=violations,
        fluff_config=FluffConfig(overrides=dict(rules=rule)),
    )
Exemple #29
0
def test__templated_sections_do_not_raise_lint_error(in_dbt_project_dir,
                                                     fname):  # noqa
    """Test that the dbt test has only a new line lint error."""
    lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
    lnt = lntr.lint_path(path="models/my_new_project/" + fname)
    violations = lnt.check_tuples()
    print(violations)
    assert len(violations) == 0
Exemple #30
0
def test__templated_sections_do_not_raise_lint_error(in_dbt_project_dir):  # noqa
    """Test that the dbt test has only a new line lint error."""
    lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
    lnt = lntr.lint_string(fname="tests/test.sql")
    print(lnt.violations)
    assert len(lnt.violations) == 1
    # Newlines are removed by dbt templater
    assert lnt.violations[0].rule.code == "L009"