Exemplo n.º 1
0
def assert_rule_fail_in_sql(code, sql, configs=None):
    """Assert that a given rule does fail on the given sql."""
    # Configs allows overrides if we want to use them.
    cfg = FluffConfig(configs=configs)
    r = get_rule_from_set(code, config=cfg)
    parsed, _, _ = Linter(config=cfg).parse_string(sql)
    print("Parsed:\n {0}".format(parsed.stringify()))
    lerrs, _, _, _ = r.crawl(parsed, dialect=cfg.get('dialect_obj'), fix=True)
    print("Errors Found: {0}".format(lerrs))
    assert any(v.rule.code == code for v in lerrs)
    fixed = parsed  # use this as our buffer (yes it's a bit of misnomer right here)
    while True:
        # We get the errors again, but this time skip the assertion
        # because we're in the loop. If we asserted on every loop then
        # we're stuffed.
        lerrs, _, _, _ = r.crawl(fixed,
                                 dialect=cfg.get('dialect_obj'),
                                 fix=True)
        print("Errors Found: {0}".format(lerrs))
        fixes = []
        for e in lerrs:
            fixes += e.fixes
        if not fixes:
            print("Done")
            break
        print("Fixes to apply: {0}".format(fixes))
        l_fixes = fixes  # Save the fixes to compare to later
        fixed, fixes = fixed.apply_fixes(fixes)
        # iterate until all fixes applied
        if fixes:
            if fixes == l_fixes:
                raise RuntimeError(
                    "Fixes aren't being applied: {0!r}".format(fixes))

    return fixed.raw
Exemplo n.º 2
0
def test__dialect__ansi_specific_segment_parses(segmentref, raw, caplog):
    """Test that specific segments parse as expected.

    NB: We're testing the PARSE function not the MATCH function
    although this will be a recursive parse and so the match
    function of SUBSECTIONS will be tested if present. The match
    function of the parent will not be tested.
    """
    # Set up the lexer
    config = FluffConfig(overrides=dict(dialect='ansi'))
    lex = Lexer(config=config)
    c = ParseContext.from_config(config)
    # Lex the string for matching. For a good test, this would
    # arguably happen as a fixture, but it's easier to pass strings
    # as parameters than pre-lexed segment strings.
    seg_list, vs = lex.lex(raw)
    assert not vs

    print(seg_list)
    # Get the segment class for matching
    Seg = config.get('dialect_obj').ref(segmentref)
    if not issubclass(Seg, BaseSegment):
        raise TypeError(
            "{0} is not of type Segment. Test is invalid.".format(segmentref))

    # This test is different if we're working with RawSegment
    # derivatives or not.
    if issubclass(Seg, RawSegment):
        print("Raw route...")
        with caplog.at_level(logging.DEBUG):
            parsed = Seg.match(segments=seg_list, parse_context=c)
        assert isinstance(parsed, MatchResult)
        assert len(parsed.matched_segments) == 1
        print(parsed)
        parsed = parsed.matched_segments[0]
        print(parsed)
    else:
        print("Base route...")
        # Construct an unparsed segment
        seg = Seg(seg_list, pos_marker=seg_list[0].pos_marker)
        # Perform the match (THIS IS THE MEAT OF THE TEST)
        with caplog.at_level(logging.DEBUG):
            parsed = seg.parse(parse_context=c)
        print(parsed)
        assert isinstance(parsed, Seg)

    # Check we get a good response
    print(parsed)
    print(type(parsed))
    # print(type(parsed._reconstruct()))
    print(type(parsed.raw))
    # Check we're all there.
    assert parsed.raw == raw
    # Check that there's nothing un parsable
    typs = parsed.type_set()
    assert 'unparsable' not in typs
Exemplo n.º 3
0
def assert_rule_pass_in_sql(code, sql, configs=None):
    """Assert that a given rule doesn't fail on the given sql."""
    # Configs allows overrides if we want to use them.
    cfg = FluffConfig(configs=configs)
    r = get_rule_from_set(code, config=cfg)
    parsed, _, _ = Linter(config=cfg).parse_string(sql)
    print("Parsed:\n {0}".format(parsed.stringify()))
    lerrs, _, _, _ = r.crawl(parsed, dialect=cfg.get('dialect_obj'), fix=True)
    print("Errors Found: {0}".format(lerrs))
    assert not any(v.rule.code == code for v in lerrs)
Exemplo n.º 4
0
def test__dialect__base_file_parse(dialect, file):
    """For given test examples, check successful parsing."""
    raw = load_file(dialect, file)
    # Load the right dialect
    config = FluffConfig(overrides=dict(dialect=dialect))
    context = ParseContext.from_config(config)
    fs, lex_vs = FileSegment.from_raw(raw, config=config)
    # From just the initial parse, check we're all there
    assert fs.raw == raw
    # Check we don't have lexing issues
    assert not lex_vs

    # Do the parse WITHOUT lots of logging
    # The logs get too long here to be useful. We should use
    # specfic segment tests if we want to debug logs.
    # with caplog.at_level(logging.DEBUG):
    print("Pre-parse structure: {0}".format(fs.to_tuple(show_raw=True)))
    print("Pre-parse structure: {0}".format(fs.stringify()))
    parsed = fs.parse(
        parse_context=context)  # Optional: set recurse=1 to limit recursion
    print("Post-parse structure: {0}".format(fs.to_tuple(show_raw=True)))
    print("Post-parse structure: {0}".format(fs.stringify()))
    # Check we're all there.
    assert parsed.raw == raw
    # Check that there's nothing un parsable
    typs = parsed.type_set()
    assert 'unparsable' not in typs
Exemplo n.º 5
0
def test__linter__path_from_paths__file():
    """Test extracting paths from a file path."""
    lntr = Linter(config=FluffConfig())
    paths = lntr.paths_from_path('test/fixtures/linter/indentation_errors.sql')
    assert normalise_paths(paths) == {
        'test.fixtures.linter.indentation_errors.sql'
    }
Exemplo n.º 6
0
def test__linter__lint_string_vs_file(path):
    """Test the linter finds the same things on strings and files."""
    with open(path, 'r') as f:
        sql_str = f.read()
    lntr = Linter(config=FluffConfig())
    assert (lntr.lint_string(sql_str).check_tuples() == lntr.lint_path(
        path).check_tuples())
Exemplo n.º 7
0
def test__templater_jinja_error_catatrophic():
    """Test error handling in the jinja templater."""
    t = JinjaTemplateInterface(override_context=dict(blah=7))
    instr = JINJA_STRING
    outstr, vs = t.process(instr, config=FluffConfig())
    assert not outstr
    assert len(vs) > 0
Exemplo n.º 8
0
def test__templater_jinja():
    """Test jinja templating and the treatment of whitespace."""
    t = JinjaTemplateInterface(
        override_context=dict(blah='foo', condition='a < 10'))
    instr = JINJA_STRING
    outstr, _ = t.process(instr, config=FluffConfig())
    assert outstr == 'SELECT * FROM f, o, o WHERE a < 10\n\n'
Exemplo n.º 9
0
def test__parser__lexer_fail():
    """Test the how the lexer fails and reports errors."""
    lex = Lexer(config=FluffConfig())
    try:
        lex.lex("Select \u0394")
    except SQLLexError as err:
        assert err.pos_marker().char_pos == 7
Exemplo n.º 10
0
def test__parser__lexer_fail_via_parse():
    """Test the how the parser fails and reports errors while lexing."""
    _, vs = FileSegment.from_raw("Select \u0394", config=FluffConfig())
    assert vs
    assert len(vs) == 1
    err = vs[0]
    assert isinstance(err, SQLLexError)
    assert err.pos_marker().char_pos == 7
Exemplo n.º 11
0
def test__dialect__ansi_specific_segment_not_parse(raw, err_locations, caplog):
    """Test queries do not parse, with parsing errors raised properly."""
    config = FluffConfig(overrides=dict(dialect='ansi'))
    lnt = Linter(config=config)
    _, vs, _ = lnt.parse_string(raw)
    assert len(vs) > 0
    locs = [(v.line_no(), v.line_pos()) for v in vs]
    assert locs == err_locations
Exemplo n.º 12
0
def test__templater_jinja_error():
    """Test error handling in the jinja templater."""
    t = JinjaTemplateInterface(override_context=dict(blah='foo'))
    instr = JINJA_STRING
    outstr, vs = t.process(instr, config=FluffConfig())
    assert outstr == 'SELECT * FROM f, o, o WHERE \n\n'
    # Check we have violations.
    assert len(vs) > 0
Exemplo n.º 13
0
def test__linter__path_from_paths__ignore(path):
    """Test extracting paths from a dot."""
    lntr = Linter(config=FluffConfig())
    paths = lntr.paths_from_path(path)
    # We should only get query_b, because of the sqlfluffignore files.
    assert normalise_paths(paths) == {
        'test.fixtures.linter.sqlfluffignore.path_b.query_b.sql'
    }
Exemplo n.º 14
0
def test__dialect__ansi__file_from_raw(raw, res, caplog):
    """Test we don't drop bits on simple examples."""
    config = FluffConfig(overrides=dict(dialect='ansi'))
    with caplog.at_level(logging.DEBUG):
        fs = FileSegment.from_raw(raw, config=config)
    # From just the initial parse, check we're all there
    assert fs.raw == raw
    assert fs.raw_list() == res
Exemplo n.º 15
0
def test__linter__path_from_paths__dir():
    """Test extracting paths from directories."""
    lntr = Linter(config=FluffConfig())
    paths = lntr.paths_from_path('test/fixtures/lexer')
    assert normalise_paths(paths) == {
        'test.fixtures.lexer.block_comment.sql',
        'test.fixtures.lexer.inline_comment.sql',
        'test.fixtures.lexer.basic.sql'
    }
Exemplo n.º 16
0
def test__rules__std_file(rule, path, violations):
    """Test the linter finds the given errors in (and only in) the right places."""
    # Use config to look for only the rule we care about.
    lntr = Linter(config=FluffConfig(overrides=dict(rules=rule)))
    lnt = lntr.lint_path(path)
    # Reformat the test data to match the format we're expecting. We use
    # sets because we really don't care about order and if one is missing,
    # we don't care about the orders of the correct ones.
    assert set(lnt.check_tuples()) == {(rule, v[0], v[1]) for v in violations}
Exemplo n.º 17
0
def test__parser__lexer_fail():
    """Test the how the lexer fails and reports errors."""
    lex = Lexer(config=FluffConfig())

    _, vs = lex.lex("Select \u0394")

    assert len(vs) == 1
    err = vs[0]
    assert isinstance(err, SQLLexError)
    assert err.pos_marker().char_pos == 7
Exemplo n.º 18
0
def test__linter__path_from_paths__dot():
    """Test extracting paths from a dot."""
    lntr = Linter(config=FluffConfig())
    paths = lntr.paths_from_path('.')
    # Use set theory to check that we get AT LEAST these files
    assert normalise_paths(paths) >= {
        'test.fixtures.lexer.block_comment.sql',
        'test.fixtures.lexer.inline_comment.sql',
        'test.fixtures.lexer.basic.sql'
    }
Exemplo n.º 19
0
def assert_structure(yaml_loader, path, code_only=True):
    """Check that a parsed sql file matches the yaml file with the same name."""
    lntr = Linter(config=FluffConfig())
    p = list(lntr.parse_path(path + '.sql'))
    parsed = p[0][0]
    if parsed is None:
        print(p)
        raise RuntimeError(p[0][1])
    # Whitespace is important here to test how that's treated
    tpl = parsed.to_tuple(code_only=code_only, show_raw=True)
    expected = yaml_loader(path + '.yml')
    assert tpl == expected
Exemplo n.º 20
0
def test__dialect__base_parse_struct(dialect, sqlfile, yamlfile, yaml_loader):
    """For given test examples, check parsed structure against yaml."""
    # Load the right dialect
    config = FluffConfig(overrides=dict(dialect=dialect))
    context = ParseContext.from_config(config)
    # Load the SQL
    raw = load_file(dialect, sqlfile)
    fs, _ = FileSegment.from_raw(raw, config=config)
    # Load the YAML
    res = yaml_loader(make_dialect_path(dialect, yamlfile))
    # with caplog.at_level(logging.DEBUG):
    parsed = fs.parse(parse_context=context)
    assert parsed.to_tuple(code_only=True, show_raw=True) == res
Exemplo n.º 21
0
def test__dialect__ansi_specific_segment_not_match(segmentref, raw, caplog):
    """Test that specific segments do not match.

    NB: We're testing the MATCH function not the PARSE function.
    This is the opposite to the above.
    """
    config = FluffConfig(overrides=dict(dialect='ansi'))
    seg_list = lex(raw, config=config)
    Seg = validate_segment(segmentref, config=config)
    c = ParseContext.from_config(config)

    with caplog.at_level(logging.DEBUG):
        match = Seg.match(segments=seg_list, parse_context=c)

    assert not match
Exemplo n.º 22
0
def test__rules__std_L003_process_raw_stack(generate_test_segments):
    """Test the _process_raw_stack function.

    Note: This test probably needs expanding. It doesn't
    really check enough of the full functionality.

    """
    cfg = FluffConfig()
    r = get_rule_from_set('L003', config=cfg)
    test_stack = generate_test_segments(
        ['bar', '\n', '     ', 'foo', 'baar', ' \t '])
    res = r._process_raw_stack(test_stack)
    print(res)
    assert sorted(res.keys()) == [1, 2]
    assert res[2]['indent_size'] == 5
Exemplo n.º 23
0
def test__dialect__ansi_specific_segment_parses(segmentref, raw, caplog):
    """Test that specific segments parse as expected.

    NB: We're testing the PARSE function not the MATCH function
    although this will be a recursive parse and so the match
    function of SUBSECTIONS will be tested if present. The match
    function of the parent will not be tested.
    """
    config = FluffConfig(overrides=dict(dialect='ansi'))
    seg_list = lex(raw, config=config)
    Seg = validate_segment(segmentref, config=config)

    # This test is different if we're working with RawSegment
    # derivatives or not.
    if issubclass(Seg, RawSegment):
        print("Raw route...")
        with RootParseContext.from_config(config) as ctx:
            with caplog.at_level(logging.DEBUG):
                parsed = Seg.match(segments=seg_list, parse_context=ctx)
        assert isinstance(parsed, MatchResult)
        assert len(parsed.matched_segments) == 1
        print(parsed)
        parsed = parsed.matched_segments[0]
        print(parsed)
    else:
        print("Base route...")
        # Construct an unparsed segment
        seg = Seg(seg_list, pos_marker=seg_list[0].pos_marker)
        # Perform the match (THIS IS THE MEAT OF THE TEST)
        with RootParseContext.from_config(config) as ctx:
            with caplog.at_level(logging.DEBUG):
                parsed = seg.parse(parse_context=ctx)
        print(parsed)
        assert isinstance(parsed, Seg)

    # Check we get a good response
    print(parsed)
    print(type(parsed))
    # print(type(parsed._reconstruct()))
    print(type(parsed.raw))
    # Check we're all there.
    assert parsed.raw == raw
    # Check that there's nothing un parsable
    typs = parsed.type_set()
    assert 'unparsable' not in typs
Exemplo n.º 24
0
def test__config__nested_config_tests():
    """Test linting with overriden config in nested paths.

    This looks like a linter test but it's actually a config
    test.
    """
    lntr = Linter(config=FluffConfig(overrides=dict(exclude_rules='L002')))
    lnt = lntr.lint_path('test/fixtures/config/inheritance_b')
    violations = lnt.check_tuples(by_path=True)
    for k in violations:
        if k.endswith('nested\\example.sql'):
            assert ('L003', 1, 4) in violations[k]
            assert ('L009', 1, 12) in violations[k]
            assert 'L002' not in [c[0] for c in violations[k]]
        elif k.endswith('inheritance_b\\example.sql'):
            assert ('L003', 1, 4) in violations[k]
            assert 'L002' not in [c[0] for c in violations[k]]
            assert 'L009' not in [c[0] for c in violations[k]]
Exemplo n.º 25
0
def test__dialect__base_file_parse(dialect, file, caplog):
    """For given test examples, check successful parsing."""
    raw = load_file(dialect, file)
    # Load the right dialect
    config = FluffConfig(overrides=dict(dialect=dialect))
    context = ParseContext.from_config(config)
    fs = FileSegment.from_raw(raw, config=config)
    # From just the initial parse, check we're all there
    assert fs.raw == raw
    # Do the parse with lots of logging
    with caplog.at_level(logging.DEBUG):
        logging.debug("Pre-parse structure: {0}".format(fs.to_tuple(show_raw=True)))
        logging.debug("Pre-parse structure: {0}".format(fs.stringify()))
        parsed = fs.parse(parse_context=context)  # Optional: set recurse=1 to limit recursion
        logging.debug("Post-parse structure: {0}".format(fs.to_tuple(show_raw=True)))
        logging.debug("Post-parse structure: {0}".format(fs.stringify()))
    # Check we're all there.
    assert parsed.raw == raw
    # Check that there's nothing un parsable
    typs = parsed.type_set()
    assert 'unparsable' not in typs
Exemplo n.º 26
0
def test__linter__path_from_paths__not_exist():
    """Test extracting paths from a file path."""
    lntr = Linter(config=FluffConfig())
    with pytest.raises(IOError):
        lntr.paths_from_path('asflekjfhsakuefhse')
Exemplo n.º 27
0
def test__parser__lexer_obj(raw, res, caplog):
    """Test the lexer splits as expected in a selection of cases."""
    lex = Lexer(config=FluffConfig())
    with caplog.at_level(logging.DEBUG):
        lexing_segments, _ = lex.lex(raw)
        assert [seg.raw for seg in lexing_segments] == res
Exemplo n.º 28
0
Arquivo: sql.py Projeto: ry-v1/lms
 def initialize(self):
     self._app = Linter(config=FluffConfig.from_root())
Exemplo n.º 29
0
def test__linter__path_from_paths__not_exist_ignore():
    """Test extracting paths from a file path."""
    lntr = Linter(config=FluffConfig())
    paths = lntr.paths_from_path('asflekjfhsakuefhse',
                                 ignore_non_existent_files=True)
    assert len(paths) == 0
Exemplo n.º 30
0
def auto_fix_test(rules, dialect, folder):
    """A test for roundtrip testing, take a file buffer, lint, fix and lint.

    This is explicitly different from the linter version of this, in that
    it uses the command line rather than the direct api.
    """
    filename = 'testing.sql'
    # Lets get the path of a file to use
    tempdir_path = tempfile.mkdtemp()
    filepath = os.path.join(tempdir_path, filename)
    cfgpath = os.path.join(tempdir_path, '.sqlfluff')
    src_filepath = os.path.join(*base_auto_fix_path, dialect, folder,
                                'before.sql')
    cmp_filepath = os.path.join(*base_auto_fix_path, dialect, folder,
                                'after.sql')
    vio_filepath = os.path.join(*base_auto_fix_path, dialect, folder,
                                'violations.json')
    cfg_filepath = os.path.join(*base_auto_fix_path, dialect, folder,
                                '.sqlfluff')
    # Open the example file and write the content to it
    print_buff = ''
    with open(filepath, mode='w') as dest_file:
        with open(src_filepath, mode='r') as source_file:
            for line in source_file:
                dest_file.write(line)
                print_buff += line
    # Copy the config file too
    try:
        with open(cfgpath, mode='w') as dest_file:
            with open(cfg_filepath, mode='r') as source_file:
                for line in source_file:
                    dest_file.write(line)
    except FileNotFoundError:
        # No config file? No biggie
        pass
    print("## Input file:\n{0}".format(print_buff))
    # Do we need to do a violations check?
    try:
        with open(vio_filepath, mode='r') as vio_file:
            violations = json.load(vio_file)
    except FileNotFoundError:
        # No violations file. Let's not worry
        violations = None

    # Run the fix command
    cfg = FluffConfig.from_root(overrides=dict(rules=rules, dialect=dialect))
    lnt = Linter(config=cfg, output_func=lambda m: None)
    res = lnt.lint_path(filepath, fix=True)

    # If we have a violations structure, let's enforce it.
    if violations:
        vs = set(res.check_tuples())
        # Format the violations file
        expected_vs = set()
        for rule_key in violations["violations"]["linting"]:
            for elem in violations["violations"]["linting"][rule_key]:
                expected_vs.add((rule_key, *elem))
        assert expected_vs == vs

    # Actually do the fixes
    res = do_fixes(lnt, res)
    # Read the fixed file
    with open(filepath, mode='r') as fixed_file:
        fixed_buff = fixed_file.read()
    # Clearup once read
    shutil.rmtree(tempdir_path)
    # Read the comparison file
    with open(cmp_filepath, mode='r') as comp_file:
        comp_buff = comp_file.read()

    # Make sure we were successful
    assert res
    # Assert that we fixed as expected
    assert fixed_buff == comp_buff