def test__cli__formatters__violation(tmpdir): """Test formatting violations. NB Position is 1 + start_pos. """ s = RawSegment( "foobarbar", PositionMarker( slice(10, 19), slice(10, 19), TemplatedFile.from_string(" \n\n foobarbar"), ), ) r = RuleGhost("A", "DESC") v = SQLLintError(segment=s, rule=r) formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) f = formatter.format_violation(v) # Position is 3, 3 becase foobarbar is on the third # line (i.e. it has two newlines preceding it) and # it's at the third position in that line (i.e. there # are two characters between it and the preceding # newline). assert escape_ansi(f) == "L: 3 | P: 3 | A | DESC"
def test__dialect__base_file_parse(dialect, file): """For given test examples, check successful parsing.""" raw = load_file(dialect, file) # Load the right dialect config = FluffConfig(overrides=dict(dialect=dialect)) tokens, lex_vs = Lexer(config=config).lex(raw) # From just the initial parse, check we're all there assert "".join(token.raw for token in tokens) == raw # Check we don't have lexing issues assert not lex_vs # Do the parse WITHOUT lots of logging # The logs get too long here to be useful. We should use # specfic segment tests if we want to debug logs. if raw: parsed = Parser(config=config).parse(tokens) print("Post-parse structure: {0}".format( parsed.to_tuple(show_raw=True))) print("Post-parse structure: {0}".format(parsed.stringify())) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs else: # If it's an empty file, check that we get a value exception # here. The linter handles this by *not* parsing the file, # but in this case, we explicitly want an error. with pytest.raises(ValueError): Parser(config=config).parse(tokens)
def test__cli__formatters__filename_nocol(tmpdir): """Test formatting filenames.""" formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) res = formatter.format_filename("blahblah", success=True) assert escape_ansi(res) == "== [blahblah] PASS"
def test__templater_dbt_sequence_files_ephemeral_dependency( project_dir, # noqa: F811 dbt_templater, # noqa: F811 ): """Test that dbt templater sequences files based on dependencies.""" result = dbt_templater.sequence_files( [ str( Path(project_dir) / "models" / "depends_on_ephemeral" / "a.sql"), str( Path(project_dir) / "models" / "depends_on_ephemeral" / "b.sql"), str( Path(project_dir) / "models" / "depends_on_ephemeral" / "c.sql"), str( Path(project_dir) / "models" / "depends_on_ephemeral" / "d.sql"), ], config=FluffConfig(configs=DBT_FLUFF_CONFIG), ) # c.sql should come first because b.sql depends on c.sql. assert result == [ str(Path(project_dir) / "models" / "depends_on_ephemeral" / "a.sql"), str(Path(project_dir) / "models" / "depends_on_ephemeral" / "c.sql"), str(Path(project_dir) / "models" / "depends_on_ephemeral" / "b.sql"), str(Path(project_dir) / "models" / "depends_on_ephemeral" / "d.sql"), ]
def test__templater_jinja(instr, expected_outstr): """Test jinja templating and the treatment of whitespace.""" t = JinjaTemplater(override_context=dict(blah="foo", condition="a < 10")) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr
def test__linter__skip_large_bytes(filesize, raises_skip): """Test extracting paths from a file path.""" config = FluffConfig(overrides={ "large_file_skip_byte_limit": filesize, "dialect": "ansi" }) # First check the function directly if raises_skip: with pytest.raises(SQLFluffSkipFile) as excinfo: Linter._load_raw_file_and_config( "test/fixtures/linter/indentation_errors.sql", config) assert "Skipping" in str(excinfo.value) assert f"over the limit of {filesize}" in str(excinfo.value) # If NOT raises, then we'll catch the raise an error and the test will fail. # Then check that it either is or isn't linted appropriately via lint_paths. lntr = Linter(config) result = lntr.lint_paths( ("test/fixtures/linter/indentation_errors.sql", ), ) if raises_skip: assert not result.get_violations() else: assert result.get_violations() # Same again via parse_path, which is the other entry point. result = list( lntr.parse_path("test/fixtures/linter/indentation_errors.sql", )) if raises_skip: assert not result else: assert result
def test__templater_jinja_error_catastrophic(): """Test error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah=7)) instr = JINJA_STRING outstr, vs = t.process(in_str=instr, fname="test", config=FluffConfig()) assert not outstr assert len(vs) > 0
def test_non_selects_unparseable(raw: str) -> None: """Test that non-SELECT commands are not parseable.""" cfg = FluffConfig(configs={"core": {"dialect": "soql"}}) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert len(result.violations) == 1 assert isinstance(result.violations[0], SQLParseError)
def test__dialect__base_file_parse(dialect, file): """For given test examples, check successful parsing.""" raw = load_file(dialect, file) # Load the right dialect config = FluffConfig(overrides=dict(dialect=dialect)) fs, lex_vs = FileSegment.from_raw(raw, config=config) # From just the initial parse, check we're all there assert fs.raw == raw # Check we don't have lexing issues assert not lex_vs # Do the parse WITHOUT lots of logging # The logs get too long here to be useful. We should use # specfic segment tests if we want to debug logs. # with caplog.at_level(logging.DEBUG): print("Pre-parse structure: {0}".format(fs.to_tuple(show_raw=True))) print("Pre-parse structure: {0}".format(fs.stringify())) with RootParseContext.from_config( config) as ctx: # Optional: set recurse=1 to limit recursion parsed = fs.parse(parse_context=ctx) print("Post-parse structure: {0}".format(fs.to_tuple(show_raw=True))) print("Post-parse structure: {0}".format(fs.stringify())) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs
def test_linter_noqa_with_templating(): """Similar to test_linter_noqa, but uses templating (Jinja).""" lntr = Linter( config=FluffConfig( overrides={ "dialect": "bigquery", # Use bigquery to allow hash comments. "templater": "jinja", "rules": "L016", } ) ) sql = "\n" '"{%- set a_var = ["1", "2"] -%}\n' "SELECT\n" " this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_" "templated_sql_files, --noqa: L016\n" " this_is_not_so_big a, --Inline comment --noqa: L012\n" " this_is_not_so_big b, /* Block comment */ --noqa: L012\n" " this_is_not_so_big c, # hash comment --noqa: L012\n" " this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_" "templated_sql_files, --noqa: L01*\n" "FROM\n" " a_table\n" " " result = lntr.lint_string(sql) assert not result.get_violations()
def test__templater_jinja(): """Test jinja templating and the treatment of whitespace.""" t = JinjaTemplateInterface( override_context=dict(blah="foo", condition="a < 10")) instr = JINJA_STRING outstr, _ = t.process(instr, config=FluffConfig()) assert outstr == "SELECT * FROM f, o, o WHERE a < 10\n\n"
def test__linter__path_from_paths__exts(): """Test configuration of file discovery.""" lntr = Linter(config=FluffConfig(overrides={"sql_file_exts": ".txt"})) paths = normalise_paths(lntr.paths_from_path("test/fixtures/linter")) assert "test.fixtures.linter.passing.sql" not in paths assert "test.fixtures.linter.passing_cap_extension.SQL" not in paths assert "test.fixtures.linter.discovery_file.txt" in paths
def test_bigquery_relational_operator_parsing(data): """Tests queries with a diverse mixture of relational operators.""" # Generate a simple SELECT query with relational operators and conjunctions # as specified in 'data'. Note the conjunctions are used as separators # between comparisons, sn the conjunction in the first item is not used. filter = [] for i, (relation, conjunction) in enumerate(data): if i: filter.append(f" {conjunction} ") filter.append(f"a {relation} b") raw = f'SELECT * FROM t WHERE {"".join(filter)}' note(f"query: {raw}") # Load the right dialect config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(raw) # From just the initial parse, check we're all there assert "".join(token.raw for token in tokens) == raw # Check we don't have lexing issues assert not lex_vs # Do the parse WITHOUT lots of logging # The logs get too long here to be useful. We should use # specfic segment tests if we want to debug logs. parsed = Parser(config=config).parse(tokens) print("Post-parse structure: {0}".format(parsed.to_tuple(show_raw=True))) print("Post-parse structure: {0}".format(parsed.stringify())) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs
def test_lint_path_parallel_wrapper_exception(patched_base_run): """Tests the error catching behavior of _lint_path_parallel_wrapper().""" patched_base_run.side_effect = ValueError("Something unexpected happened") result = runner.MultiProcessRunner._lint_path(Linter, FluffConfig(), "") assert isinstance(result, runner.DelayedException) with pytest.raises(ValueError): result.reraise()
def get_simple_config( dialect: Optional[str] = None, rules: Optional[List[str]] = None, exclude_rules: Optional[List[str]] = None, config_path: Optional[str] = None, ) -> FluffConfig: """Get a config object from simple API arguments.""" # Create overrides for simple API arguments. overrides = {} if dialect is not None: # Check the requested dialect exists and is valid. try: dialect_selector(dialect) except SQLFluffUserError as err: # pragma: no cover raise SQLFluffUserError( f"Error loading dialect '{dialect}': {str(err)}") except KeyError: raise SQLFluffUserError(f"Error: Unknown dialect '{dialect}'") overrides["dialect"] = dialect if rules is not None: overrides["rules"] = ",".join(rules) if exclude_rules is not None: overrides["exclude_rules"] = ",".join(exclude_rules) # Instantiate a config object. try: return FluffConfig.from_root( extra_config_path=config_path, ignore_local_config=True, overrides=overrides, ) except SQLFluffUserError as err: # pragma: no cover raise SQLFluffUserError(f"Error loading config: {str(err)}")
def test__templater_dbt_handle_exceptions( project_dir, dbt_templater, fname, exception_msg # noqa: F811 ): """Test that exceptions during compilation are returned as violation.""" from dbt.adapters.factory import get_adapter src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/" + fname target_fpath = os.path.abspath( os.path.join(project_dir, "models/my_new_project/", fname)) # We move the file that throws an error in and out of the project directory # as dbt throws an error if a node fails to parse while computing the DAG os.rename(src_fpath, target_fpath) try: _, violations = dbt_templater.process( in_str="", fname=target_fpath, config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides={"dialect": "ansi"}), ) finally: get_adapter(dbt_templater.dbt_config).connections.release() os.rename(target_fpath, src_fpath) assert violations # NB: Replace slashes to deal with different plaform paths being returned. assert violations[0].desc().replace("\\", "/").startswith(exception_msg)
def test__templater_dbt_profiles_dir_expanded(dbt_templater): # noqa """Check that the profiles_dir is expanded.""" dbt_templater.sqlfluff_config = FluffConfig( configs={"templater": {"dbt": {"profiles_dir": "~/.dbt"}}} ) profiles_dir = dbt_templater._get_profiles_dir() assert profiles_dir == os.path.expanduser("~/.dbt")
def test__templater_param_style(instr, expected_outstr, param_style, values): """Test different param_style templating.""" t = PlaceholderTemplater(override_context={ **values, "param_style": param_style }) outstr, _ = t.process(in_str=instr, fname="test", config=FluffConfig()) assert str(outstr) == expected_outstr
def test__rules__std_L062_raised() -> None: """L062 is raised for use of blocked words with correct error message.""" sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n" cfg = FluffConfig() cfg.set_value( config_path=["rules", "L062", "blocked_words"], val="myoldfunction,deprecated_table", ) linter = Linter(config=cfg) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert len(result) == 2 assert result[0]["description"] == "Use of blocked word 'MYOLDFUNCTION'." assert result[1][ "description"] == "Use of blocked word 'deprecated_table'."
def test_linter_noqa(): """Test "noqa" feature at the higher "Linter" level.""" lntr = Linter( config=FluffConfig( overrides={ "rules": "L012", } ) ) sql = """ SELECT col_a a, col_b b, --noqa: disable=L012 col_c c, col_d d, --noqa: enable=L012 col_e e, col_f f, col_g g, --noqa col_h h, col_i i, --noqa:L012 col_j j, col_k k, --noqa:L013 col_l l, col_m m, col_n n, --noqa: disable=all col_o o, col_p p --noqa: enable=all FROM foo """ result = lntr.lint_string(sql) violations = result.get_violations() assert {3, 6, 7, 8, 10, 12, 13, 14, 15, 18} == {v.line_no for v in violations}
def test__templater_jinja_slice_file(raw_file, override_context, result, caplog): """Test slice_file.""" templater = JinjaTemplater(override_context=override_context) env, live_context, make_template = templater.template_builder( config=FluffConfig.from_path( "test/fixtures/templater/jinja_slice_template_macros")) templated_file = make_template(raw_file).render() with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): _, resp, _ = templater.slice_file(raw_file, templated_file, make_template=make_template) # Check contiguous on the TEMPLATED VERSION print(resp) prev_slice = None for elem in resp: print(elem) if prev_slice: assert elem[2].start == prev_slice.stop prev_slice = elem[2] # Check that all literal segments have a raw slice for elem in resp: if elem[0] == "literal": assert elem[1] is not None # check result actual = [( templated_file_slice.slice_type, templated_file_slice.source_slice, templated_file_slice.templated_slice, ) for templated_file_slice in resp] assert actual == result
def test_templater_set_block_handling(): """Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template. """ def run_query(sql): # Prior to the bug fix, this assertion failed. This was bad because, # inside JinjaTracer, dbt templates similar to the one in this test # would call the database with funky SQL (including weird strings it # uses internally like: 00000000000000000000000000000002. assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n" return sql t = JinjaTemplater(override_context=dict(run_query=run_query)) instr = """{% set my_query1 %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {% set my_query2 %} {{ my_query1 }} {% endset %} {{ run_query(my_query2) }} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n" assert len(vs) == 0
def test__templated_sections_do_not_raise_lint_error(in_dbt_project_dir, fname): # noqa """Test that the dbt test has only a new line lint error.""" lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG)) lnt = lntr.lint_path(path="models/my_new_project/" + fname) violations = lnt.check_tuples() print(violations) assert len(violations) == 0
def test__dialect__ansi__file_from_raw(raw, res, caplog): """Test we don't drop bits on simple examples.""" config = FluffConfig(overrides=dict(dialect="ansi")) with caplog.at_level(logging.DEBUG): fs, _ = FileSegment.from_raw(raw, config=config) # From just the initial parse, check we're all there assert fs.raw == raw assert fs.raw_list() == res
def test__rules__std_file(rule, path, violations): """Test the linter finds the given errors in (and only in) the right places.""" assert_rule_raises_violations_in_file( rule=rule, fpath=path, violations=violations, fluff_config=FluffConfig(overrides=dict(rules=rule)), )
def test__templated_sections_do_not_raise_lint_error(in_dbt_project_dir): # noqa """Test that the dbt test has only a new line lint error.""" lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG)) lnt = lntr.lint_string(fname="tests/test.sql") print(lnt.violations) assert len(lnt.violations) == 1 # Newlines are removed by dbt templater assert lnt.violations[0].rule.code == "L009"
def test__linter__skip_dbt_model_disabled(in_dbt_project_dir): # noqa """Test that the linter skips disabled dbt models.""" conf = FluffConfig(configs={"core": {"templater": "dbt"}}) lntr = Linter(config=conf) linted_path = lntr.lint_path(path="models/my_new_project/disabled_model.sql") linted_file = linted_path.files[0] assert linted_file.path == "models/my_new_project/disabled_model.sql" assert not linted_file.templated_file
def test__templater_jinja_error(): """Test error handling in the jinja templater.""" t = JinjaTemplateInterface(override_context=dict(blah="foo")) instr = JINJA_STRING outstr, vs = t.process(instr, config=FluffConfig()) assert outstr == "SELECT * FROM f, o, o WHERE \n\n" # Check we have violations. assert len(vs) > 0
def test__linter__lint_ephemeral_3_level(project_dir): # noqa """Test linter can lint a project with 3-level ephemeral dependencies.""" # This was previously crashing inside dbt, in a function named # inject_ctes_into_sql(). (issue 2671). conf = FluffConfig(configs=DBT_FLUFF_CONFIG) lntr = Linter(config=conf) model_file_path = os.path.join(project_dir, "models/ephemeral_3_level") lntr.lint_path(path=model_file_path)
def test__parser__lexer_fail_via_parse(): """Test the how the parser fails and reports errors while lexing.""" _, vs = FileSegment.from_raw("Select \u0394", config=FluffConfig()) assert vs assert len(vs) == 1 err = vs[0] assert isinstance(err, SQLLexError) assert err.pos_marker().char_pos == 7