def assert_rule_fail_in_sql(code, sql, configs=None): """Assert that a given rule does fail on the given sql.""" # Configs allows overrides if we want to use them. cfg = FluffConfig(configs=configs) r = get_rule_from_set(code, config=cfg) parsed, _, _ = Linter(config=cfg).parse_string(sql) print("Parsed:\n {0}".format(parsed.stringify())) lerrs, _, _, _ = r.crawl(parsed, dialect=cfg.get('dialect_obj'), fix=True) print("Errors Found: {0}".format(lerrs)) assert any(v.rule.code == code for v in lerrs) fixed = parsed # use this as our buffer (yes it's a bit of misnomer right here) while True: # We get the errors again, but this time skip the assertion # because we're in the loop. If we asserted on every loop then # we're stuffed. lerrs, _, _, _ = r.crawl(fixed, dialect=cfg.get('dialect_obj'), fix=True) print("Errors Found: {0}".format(lerrs)) fixes = [] for e in lerrs: fixes += e.fixes if not fixes: print("Done") break print("Fixes to apply: {0}".format(fixes)) l_fixes = fixes # Save the fixes to compare to later fixed, fixes = fixed.apply_fixes(fixes) # iterate until all fixes applied if fixes: if fixes == l_fixes: raise RuntimeError( "Fixes aren't being applied: {0!r}".format(fixes)) return fixed.raw
def test__dialect__ansi_specific_segment_parses(segmentref, raw, caplog): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ # Set up the lexer config = FluffConfig(overrides=dict(dialect='ansi')) lex = Lexer(config=config) c = ParseContext.from_config(config) # Lex the string for matching. For a good test, this would # arguably happen as a fixture, but it's easier to pass strings # as parameters than pre-lexed segment strings. seg_list, vs = lex.lex(raw) assert not vs print(seg_list) # Get the segment class for matching Seg = config.get('dialect_obj').ref(segmentref) if not issubclass(Seg, BaseSegment): raise TypeError( "{0} is not of type Segment. Test is invalid.".format(segmentref)) # This test is different if we're working with RawSegment # derivatives or not. if issubclass(Seg, RawSegment): print("Raw route...") with caplog.at_level(logging.DEBUG): parsed = Seg.match(segments=seg_list, parse_context=c) assert isinstance(parsed, MatchResult) assert len(parsed.matched_segments) == 1 print(parsed) parsed = parsed.matched_segments[0] print(parsed) else: print("Base route...") # Construct an unparsed segment seg = Seg(seg_list, pos_marker=seg_list[0].pos_marker) # Perform the match (THIS IS THE MEAT OF THE TEST) with caplog.at_level(logging.DEBUG): parsed = seg.parse(parse_context=c) print(parsed) assert isinstance(parsed, Seg) # Check we get a good response print(parsed) print(type(parsed)) # print(type(parsed._reconstruct())) print(type(parsed.raw)) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert 'unparsable' not in typs
def assert_rule_pass_in_sql(code, sql, configs=None): """Assert that a given rule doesn't fail on the given sql.""" # Configs allows overrides if we want to use them. cfg = FluffConfig(configs=configs) r = get_rule_from_set(code, config=cfg) parsed, _, _ = Linter(config=cfg).parse_string(sql) print("Parsed:\n {0}".format(parsed.stringify())) lerrs, _, _, _ = r.crawl(parsed, dialect=cfg.get('dialect_obj'), fix=True) print("Errors Found: {0}".format(lerrs)) assert not any(v.rule.code == code for v in lerrs)