示例#1
0
def test_require_match_parse_grammar():
    """Tests a segment validation check in Dialect.replace().

    If a segment class defines both match_grammar and parse_grammar, replacing
    it requires a segment that defines BOTH or NEITHER of them.
    """
    ansi_dialect = load_raw_dialect("ansi")

    # Try to register a segment that defines match_grammar but not
    # parse_grammar.
    class StatementSegment(ansi_dialect.get_segment("StatementSegment")):
        match_grammar = GreedyUntil(Ref("DelimiterSegment"))

    with pytest.raises(ValueError) as e:
        ansi_dialect.replace(StatementSegment=StatementSegment)
    assert "needs to define 'parse_grammar'" in str(e.value)

    # Now try to register a segment that defines parse_grammar but not
    # match_grammar.
    class StatementSegment(ansi_dialect.get_segment("StatementSegment")):
        parse_grammar = GreedyUntil(Ref("DelimiterSegment"))

    with pytest.raises(ValueError) as e:
        ansi_dialect.replace(StatementSegment=StatementSegment)
    assert "needs to define 'match_grammar'" in str(e.value)
示例#2
0
    OneOf,
    Delimited,
    Bracketed,
    AnyNumberOf,
    Ref,
    Anything,
    RegexLexer,
    CodeSegment,
    Indent,
    Dedent,
    OptionallyBracketed,
)

from sqlfluff.core.dialects import load_raw_dialect

ansi_dialect = load_raw_dialect("ansi")
teradata_dialect = ansi_dialect.copy_as("teradata")

teradata_dialect.patch_lexer_matchers([
    # so it also matches 1.
    RegexLexer("numeric_literal", r"([0-9]+(\.[0-9]*)?)", CodeSegment),
])

# Remove unused keywords from the dialect.
teradata_dialect.sets("unreserved_keywords").difference_update([
    # 'auto_increment',
    # The following are moved to being reserved keywords
    "UNION",
    "TIMESTAMP",
    "DATE",
])
示例#3
0
    OneOf,
    Ref,
    Sequence,
    StartsWith,
    SymbolSegment,
    StringLexer,
    RegexLexer,
    CodeSegment,
    NewlineSegment,
    StringParser,
    NamedParser,
    RegexParser,
)
from sqlfluff.core.dialects import load_raw_dialect

exasol_dialect = load_raw_dialect("exasol")
exasol_fs_dialect = exasol_dialect.copy_as("exasol_fs")
exasol_fs_dialect.sets("unreserved_keywords").add("ROWCOUNT")

exasol_fs_dialect.insert_lexer_matchers(
    [
        StringLexer(
            "walrus_operator",
            ":=",
            CodeSegment,
            segment_kwargs={"type": "walrus_operator"},
        ),
        RegexLexer(
            "function_script_terminator",
            r";\s+\/(?!\*)|\s+\/$",
            CodeSegment,
示例#4
0
    Ref,
    Sequence,
    Bracketed,
    BaseSegment,
    Delimited,
    Nothing,
    OptionallyBracketed,
    Matchable,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.dialects.dialect_redshift_keywords import (
    redshift_reserved_keywords,
    redshift_unreserved_keywords,
)

postgres_dialect = load_raw_dialect("postgres")
ansi_dialect = load_raw_dialect("ansi")

redshift_dialect = postgres_dialect.copy_as("redshift")

# Set Keywords
redshift_dialect.sets("unreserved_keywords").clear()
redshift_dialect.sets("unreserved_keywords").update(
    [n.strip().upper() for n in redshift_unreserved_keywords.split("\n")])

redshift_dialect.sets("reserved_keywords").clear()
redshift_dialect.sets("reserved_keywords").update(
    [n.strip().upper() for n in redshift_reserved_keywords.split("\n")])

redshift_dialect.sets("bare_functions").clear()
redshift_dialect.sets("bare_functions").update(["current_date", "sysdate"])