Beispiel #1
0
def group_as(tlist):
    lfunc = lambda tk: not imt(tk, t=T.Keyword) or tk.value == 'NULL'
    rfunc = lambda tk: not imt(tk, t=(T.DML, T.DDL))
    _group_left_right(tlist, (T.Keyword, 'AS'),
                      sql.Identifier,
                      valid_left=lfunc,
                      valid_right=rfunc)
    def _get_table(tlist: TokenList) -> Optional[Table]:
        """
        Return the table if valid, i.e., conforms to the [[catalog.]schema.]table
        construct.

        :param tlist: The SQL tokens
        :returns: The table if the name conforms
        """

        # Strip the alias if present.
        idx = len(tlist.tokens)

        if tlist.has_alias():
            ws_idx, _ = tlist.token_next_by(t=Whitespace)

            if ws_idx != -1:
                idx = ws_idx

        tokens = tlist.tokens[:idx]

        if (len(tokens) in (1, 3, 5)
                and all(imt(token, t=[Name, String]) for token in tokens[::2])
                and all(
                    imt(token, m=(Punctuation, "."))
                    for token in tokens[1::2])):
            return Table(
                *[remove_quotes(token.value) for token in tokens[::-2]])

        return None
    def __get_full_name(tlist: TokenList) -> Optional[str]:
        """
        Return the full unquoted table name if valid, i.e., conforms to the following
        [[cluster.]schema.]table construct.

        :param tlist: The SQL tokens
        :returns: The valid full table name
        """

        # Strip the alias if present.
        idx = len(tlist.tokens)

        if tlist.has_alias():
            ws_idx, _ = tlist.token_next_by(t=Whitespace)

            if ws_idx != -1:
                idx = ws_idx

        tokens = tlist.tokens[:idx]

        if (len(tokens) in (1, 3, 5) and all(
                imt(token, t=[Name, String]) for token in tokens[0::2])
                and all(
                    imt(token, m=(Punctuation, "."))
                    for token in tokens[1::2])):
            return ".".join(
                [remove_quotes(token.value) for token in tokens[0::2]])

        return None
Beispiel #4
0
def group_period(tlist):
    lfunc = lambda tk: imt(tk, i=(sql.SquareBrackets, sql.Identifier),
                           t=(T.Name, T.String.Symbol,))

    rfunc = lambda tk: imt(tk, i=(sql.SquareBrackets, sql.Function),
                           t=(T.Name, T.String.Symbol, T.Wildcard))

    _group_left_right(tlist, (T.Punctuation, '.'), sql.Identifier,
                      valid_left=lfunc, valid_right=rfunc)
Beispiel #5
0
 def get_parameters(self):
     """Return a list of parameters."""
     parenthesis = self.tokens[-1]
     for token in parenthesis.tokens:
         if imt(token, i=IdentifierList):
             return token.get_identifiers()
         elif imt(token, i=(Function, Identifier), t=T.Literal):
             return [token, ]
     return []
Beispiel #6
0
def group_comparison(tlist):
    I_COMPERABLE = (sql.Parenthesis, sql.Function, sql.Identifier)
    T_COMPERABLE = T_NUMERICAL + T_STRING + T_NAME

    func = lambda tk: imt(tk, t=T_COMPERABLE, i=I_COMPERABLE) or (
        imt(tk, t=T.Keyword) and tk.value.upper() == 'NULL')

    _group_left_right(tlist, (T.Operator.Comparison, None), sql.Comparison,
                      valid_left=func, valid_right=func)
Beispiel #7
0
 def get_parameters(self):
     """Return a list of parameters."""
     parenthesis = self.tokens[-1]
     for t in parenthesis.tokens:
         if imt(t, i=IdentifierList):
             return t.get_identifiers()
         elif imt(t, i=(Function, Identifier), t=T.Literal):
             return [
                 t,
             ]
     return []
Beispiel #8
0
 def valid(token):
     if imt(token, t=ttypes, i=sqlcls):
         return True
     elif token and token.is_keyword and token.normalized == 'NULL':
         return True
     else:
         return False
Beispiel #9
0
 def valid(token):
     if imt(token, t=ttypes, i=sqlcls):
         return True
     elif token and token.is_keyword and token.normalized == 'NULL':
         return True
     else:
         return False
Beispiel #10
0
    def post(tlist, pidx, tidx, nidx):
        # next_ validation is being performed here. issue261
        sqlcls = sql.SquareBrackets, sql.Function
        ttypes = T.Name, T.String.Symbol, T.Wildcard
        next_ = tlist[nidx] if nidx is not None else None
        valid_next = imt(next_, i=sqlcls, t=ttypes)

        return (pidx, nidx) if valid_next else (pidx, tidx)
Beispiel #11
0
    def post(tlist, pidx, tidx, nidx):
        # next_ validation is being performed here. issue261
        sqlcls = sql.SquareBrackets, sql.Function
        ttypes = T.Name, T.String.Symbol, T.Wildcard
        next_ = tlist[nidx] if nidx is not None else None
        valid_next = imt(next_, i=sqlcls, t=ttypes)

        return (pidx, nidx) if valid_next else (pidx, tidx)
Beispiel #12
0
def group_functions(tlist):
    token = tlist.token_next_by(t=T.Name)
    while token:
        next_ = tlist.token_next(token)
        if imt(next_, i=sql.Parenthesis):
            tokens = tlist.tokens_between(token, next_)
            token = tlist.group_tokens(sql.Function, tokens)
        token = tlist.token_next_by(t=T.Name, idx=token)
Beispiel #13
0
def group_order(tlist):
    """Group together Identifier and Asc/Desc token"""
    token = tlist.token_next_by(t=T.Keyword.Order)
    while token:
        prev = tlist.token_prev(token)
        if imt(prev, i=sql.Identifier, t=T.Number):
            tokens = tlist.tokens_between(prev, token)
            token = tlist.group_tokens(sql.Identifier, tokens)
        token = tlist.token_next_by(t=T.Keyword.Order, idx=token)
Beispiel #14
0
def group_order(tlist):
    """Group together Identifier and Asc/Desc token"""
    tidx, token = tlist.token_next_by(t=T.Keyword.Order)
    while token:
        pidx, prev_ = tlist.token_prev(tidx)
        if imt(prev_, i=sql.Identifier, t=T.Number):
            tlist.group_tokens(sql.Identifier, pidx, tidx)
            tidx = pidx
        tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx)
Beispiel #15
0
def group_arrays(tlist):
    token = tlist.token_next_by(i=sql.SquareBrackets)
    while token:
        prev = tlist.token_prev(idx=token)
        if imt(prev, i=(sql.SquareBrackets, sql.Identifier, sql.Function),
               t=(T.Name, T.String.Symbol,)):
            tokens = tlist.tokens_between(prev, token)
            token = tlist.group_tokens(sql.Identifier, tokens, extend=True)
        token = tlist.token_next_by(i=sql.SquareBrackets, idx=token)
Beispiel #16
0
def group_comments(tlist):
    tidx, token = tlist.token_next_by(t=T.Comment)
    while token:
        eidx, end = tlist.token_not_matching(lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
        if end is not None:
            eidx, end = tlist.token_prev(eidx, skip_ws=False)
            tlist.group_tokens(sql.Comment, tidx, eidx)

        tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx)
Beispiel #17
0
def group_order(tlist):
    """Group together Identifier and Asc/Desc token"""
    token = tlist.token_next_by(t=T.Keyword.Order)
    while token:
        prev = tlist.token_prev(token)
        if imt(prev, i=sql.Identifier, t=T.Number):
            tokens = tlist.tokens_between(prev, token)
            token = tlist.group_tokens(sql.Identifier, tokens)
        token = tlist.token_next_by(t=T.Keyword.Order, idx=token)
Beispiel #18
0
def group_order(tlist):
    """Group together Identifier and Asc/Desc token"""
    tidx, token = tlist.token_next_by(t=T.Keyword.Order)
    while token:
        pidx, prev_ = tlist.token_prev(tidx)
        if imt(prev_, i=sql.Identifier, t=T.Number):
            tlist.group_tokens(sql.Identifier, pidx, tidx)
            tidx = pidx
        tidx, token = tlist.token_next_by(t=T.Keyword.Order, idx=tidx)
Beispiel #19
0
def group_comments(tlist):
    tidx, token = tlist.token_next_by(t=T.Comment)
    while token:
        eidx, end = tlist.token_not_matching(
            lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
        if end is not None:
            eidx, end = tlist.token_prev(eidx, skip_ws=False)
            tlist.group_tokens(sql.Comment, tidx, eidx)

        tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx)
Beispiel #20
0
    def token_next_by_instance(self, idx, clss, end=None):
        """Returns the next token matching a class.

        *idx* is where to start searching in the list of child tokens.
        *clss* is a list of classes the token should be an instance of.

        If no matching token can be found ``None`` is returned.
        """
        funcs = lambda tk: imt(tk, i=clss)
        return self._token_matching(funcs, idx, end)
Beispiel #21
0
def group_comments(tlist):
    token = tlist.token_next_by(t=T.Comment)
    while token:
        end = tlist.token_not_matching(
            token, lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace())
        if end is not None:
            end = tlist.token_prev(end, False)
            tokens = tlist.tokens_between(token, end)
            token = tlist.group_tokens(sql.Comment, tokens)

        token = tlist.token_next_by(t=T.Comment, idx=token)
Beispiel #22
0
def _group_matching(tlist, cls):
    """Groups Tokens that have beginning and end. ie. parenthesis, brackets.."""
    idx = 1 if imt(tlist, i=cls) else 0

    token = tlist.token_next_by(m=cls.M_OPEN, idx=idx)
    while token:
        end = find_matching(tlist, token, cls.M_OPEN, cls.M_CLOSE)
        if end is not None:
            token = tlist.group_tokens(cls, tlist.tokens_between(token, end))
            _group_matching(token, cls)
        token = tlist.token_next_by(m=cls.M_OPEN, idx=token)
Beispiel #23
0
    def token_next(self, idx, skip_ws=True, skip_cm=False):
        """Returns the next token relative to *idx*.

        If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
        ``None`` is returned if there's no next token.
        """
        if isinstance(idx, int):
            idx += 1  # alot of code usage current pre-compensates for this
        funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
                                (skip_cm and imt(tk, t=T.Comment)))
        return self._token_matching(funcs, idx)
Beispiel #24
0
def group_aliased(tlist):
    I_ALIAS = (sql.Parenthesis, sql.Function, sql.Case, sql.Identifier,
               )  # sql.Operation)

    token = tlist.token_next_by(i=I_ALIAS, t=T.Number)
    while token:
        next_ = tlist.token_next(token)
        if imt(next_, i=sql.Identifier):
            tokens = tlist.tokens_between(token, next_)
            token = tlist.group_tokens(sql.Identifier, tokens, extend=True)
        token = tlist.token_next_by(i=I_ALIAS, t=T.Number, idx=token)
Beispiel #25
0
def _group_matching(tlist, cls):
    """Groups Tokens that have beginning and end. ie. parenthesis, brackets.."""
    idx = 1 if imt(tlist, i=cls) else 0

    token = tlist.token_next_by(m=cls.M_OPEN, idx=idx)
    while token:
        end = find_matching(tlist, token, cls.M_OPEN, cls.M_CLOSE)
        if end is not None:
            token = tlist.group_tokens(cls, tlist.tokens_between(token, end))
            _group_matching(token, cls)
        token = tlist.token_next_by(m=cls.M_OPEN, idx=token)
Beispiel #26
0
def group_comments(tlist):
    token = tlist.token_next_by(t=T.Comment)
    while token:
        end = tlist.token_not_matching(
            token, lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace())
        if end is not None:
            end = tlist.token_prev(end, False)
            tokens = tlist.tokens_between(token, end)
            token = tlist.group_tokens(sql.Comment, tokens)

        token = tlist.token_next_by(t=T.Comment, idx=token)
Beispiel #27
0
    def token_next(self, idx, skip_ws=True, skip_cm=False):
        """Returns the next token relative to *idx*.

        If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
        ``None`` is returned if there's no next token.
        """
        if isinstance(idx, int):
            idx += 1  # alot of code usage current pre-compensates for this
        funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
                                (skip_cm and imt(tk, t=T.Comment)))
        return self._token_matching(funcs, idx)
Beispiel #28
0
    def token_first(self, ignore_whitespace=True, ignore_comments=False):
        """Returns the first child token.

        If *ignore_whitespace* is ``True`` (the default), whitespace
        tokens are ignored.

        if *ignore_comments* is ``True`` (default: ``False``), comments are
        ignored too.
        """
        funcs = lambda tk: not ((ignore_whitespace and tk.is_whitespace()) or
                                (ignore_comments and imt(tk, i=Comment)))
        return self._token_matching(funcs)
Beispiel #29
0
    def token_prev(self, idx, skip_ws=True, skip_cm=False):
        """Returns the previous token relative to *idx*.

        If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
        ``None`` is returned if there's no previous token.
        """
        if idx is None:
            return None, None
        idx += 1  # alot of code usage current pre-compensates for this
        funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
                                (skip_cm and imt(tk, t=T.Comment)))
        return self._token_matching(funcs, idx, reverse=True)
Beispiel #30
0
    def token_first(self, skip_ws=True, skip_cm=False):
        """Returns the first child token.

        If *skip_ws* is ``True`` (the default), whitespace
        tokens are ignored.

        if *skip_cm* is ``True`` (default: ``False``), comments are
        ignored too.
        """
        # this on is inconsistent, using Comment instead of T.Comment...
        funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or (skip_cm and imt(tk, t=T.Comment, i=Comment)))
        return self._token_matching(funcs)[1]
Beispiel #31
0
    def token_first(self, skip_ws=True, skip_cm=False):
        """Returns the first child token.

        If *skip_ws* is ``True`` (the default), whitespace
        tokens are ignored.

        if *skip_cm* is ``True`` (default: ``False``), comments are
        ignored too.
        """
        # this on is inconsistent, using Comment instead of T.Comment...
        funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
                                (skip_cm and imt(tk, t=T.Comment, i=Comment)))
        return self._token_matching(funcs)[1]
Beispiel #32
0
def group_identifier_list(tlist):
    I_IDENT_LIST = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
                    sql.IdentifierList)  # sql.Operation
    T_IDENT_LIST = (T_NUMERICAL + T_STRING + T_NAME +
                    (T.Keyword, T.Comment, T.Wildcard))

    func = lambda t: imt(t, i=I_IDENT_LIST, m=M_ROLE, t=T_IDENT_LIST)
    token = tlist.token_next_by(m=M_COMMA)

    while token:
        before, after = tlist.token_prev(token), tlist.token_next(token)

        if func(before) and func(after):
            tokens = tlist.tokens_between(before, after)
            token = tlist.group_tokens(sql.IdentifierList, tokens, extend=True)
        token = tlist.token_next_by(m=M_COMMA, idx=token)
Beispiel #33
0
def group_identifier_list(tlist):
    I_IDENT_LIST = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
                    sql.IdentifierList)  # sql.Operation
    T_IDENT_LIST = (T_NUMERICAL + T_STRING + T_NAME +
                    (T.Keyword, T.Comment, T.Wildcard))

    func = lambda t: imt(t, i=I_IDENT_LIST, m=M_ROLE, t=T_IDENT_LIST)
    token = tlist.token_next_by(m=M_COMMA)

    while token:
        before, after = tlist.token_prev(token), tlist.token_next(token)

        if func(before) and func(after):
            tokens = tlist.tokens_between(before, after)
            token = tlist.group_tokens(sql.IdentifierList, tokens, extend=True)
        token = tlist.token_next_by(m=M_COMMA, idx=token)
Beispiel #34
0
def group_functions(tlist):
    has_create = False
    has_table = False
    for tmp_token in tlist.tokens:
        if tmp_token.value == u'CREATE':
            has_create = True
        if tmp_token.value == u'TABLE':
            has_table = True
    if has_create and has_table:
        return
    token = tlist.token_next_by(t=T.Name)
    while token:
        next_ = tlist.token_next(token)
        if imt(next_, i=sql.Parenthesis):
            tokens = tlist.tokens_between(token, next_)
            token = tlist.group_tokens(sql.Function, tokens)
        token = tlist.token_next_by(t=T.Name, idx=token)
Beispiel #35
0
def group_functions(tlist):
    has_create = False
    has_table = False
    for tmp_token in tlist.tokens:
        if tmp_token.value == 'CREATE':
            has_create = True
        if tmp_token.value == 'TABLE':
            has_table = True
    if has_create and has_table:
        return
    token = tlist.token_next_by(t=T.Name)
    while token:
        next_ = tlist.token_next(token)
        if imt(next_, i=sql.Parenthesis):
            tokens = tlist.tokens_between(token, next_)
            token = tlist.group_tokens(sql.Function, tokens)
        token = tlist.token_next_by(t=T.Name, idx=token)
Beispiel #36
0
def group_operator(tlist):
    I_CYCLE = (sql.SquareBrackets, sql.Parenthesis, sql.Function,
               sql.Identifier,)  # sql.Operation)
    # wilcards wouldn't have operations next to them
    T_CYCLE = T_NUMERICAL + T_STRING + T_NAME  # + T.Wildcard
    func = lambda tk: imt(tk, i=I_CYCLE, t=T_CYCLE)

    token = tlist.token_next_by(t=(T.Operator, T.Wildcard))
    while token:
        left, right = tlist.token_prev(token), tlist.token_next(token)

        if func(left) and func(right):
            token.ttype = T.Operator
            tokens = tlist.tokens_between(left, right)
            # token = tlist.group_tokens(sql.Operation, tokens)
            token = tlist.group_tokens(sql.Identifier, tokens)

        token = tlist.token_next_by(t=(T.Operator, T.Wildcard), idx=token)
Beispiel #37
0
def group_functions(tlist):
    has_create = False
    for tmp_token in tlist.tokens:
        if tmp_token.ttype == T.Whitespace:
            continue  # quick skip since there often is lots of whitespace
        if not imt(tmp_token,
                   t=(T.Keyword, T.Keyword.DDL, T.Name)):  # test only these
            continue
        unified_value = tmp_token.value.upper()
        if unified_value == 'CREATE':  # T.Keyword.DDL
            has_create = True
        elif has_create and unified_value == 'TABLE':  # T.Keyword
            return  # -- create table DDL
        elif unified_value in ('VARCHAR2', 'TIMESTAMP'):
            return

    tidx, token = tlist.token_next_by(t=T.Name)
    while token:
        nidx, next_ = tlist.token_next(tidx)
        if isinstance(next_, sql.Parenthesis):
            tlist.group_tokens(sql.Function, tidx, nidx)
        tidx, token = tlist.token_next_by(t=T.Name, idx=tidx)
Beispiel #38
0
 def create(identifier: Identifier):
     # rewrite identifier's get_real_name method, by matching the last dot instead of the first dot, so that the
     # real name for a.b.c will be c instead of b
     dot_idx, _ = identifier._token_matching(
         lambda token: imt(token, m=(Punctuation, ".")),
         start=len(identifier.tokens),
         reverse=True,
     )
     real_name = identifier._get_first_name(dot_idx, real_name=True)
     # rewrite identifier's get_parent_name accordingly
     parent_name = (
         "".join(
             [
                 escape_identifier_name(token.value)
                 for token in identifier.tokens[:dot_idx]
             ]
         )
         if dot_idx
         else None
     )
     schema = Schema(parent_name) if parent_name is not None else Schema()
     return Table(real_name, schema)
Beispiel #39
0
def has_table_query(token_list: TokenList) -> bool:
    """
    Return if a stament has a query reading from a table.

        >>> has_table_query(sqlparse.parse("COUNT(*)")[0])
        False
        >>> has_table_query(sqlparse.parse("SELECT * FROM table")[0])
        True

    Note that queries reading from constant values return false:

        >>> has_table_query(sqlparse.parse("SELECT * FROM (SELECT 1)")[0])
        False

    """
    state = InsertRLSState.SCANNING
    for token in token_list.tokens:

        # # Recurse into child token list
        if isinstance(token, TokenList) and has_table_query(token):
            return True

        # Found a source keyword (FROM/JOIN)
        if imt(token, m=[(Keyword, "FROM"), (Keyword, "JOIN")]):
            state = InsertRLSState.SEEN_SOURCE

        # Found identifier/keyword after FROM/JOIN
        elif state == InsertRLSState.SEEN_SOURCE and (
            isinstance(token, sqlparse.sql.Identifier) or token.ttype == Keyword
        ):
            return True

        # Found nothing, leaving source
        elif state == InsertRLSState.SEEN_SOURCE and token.ttype != Whitespace:
            state = InsertRLSState.SCANNING

    return False
Beispiel #40
0
 def valid(token):
     return imt(token, i=sqlcls, m=m_role, t=ttypes)
Beispiel #41
0
 def valid(token):
     return imt(token, i=sqlcls, t=ttypes) \
         or token.match(
             T.Keyword,
             ('CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP'))
Beispiel #42
0
 def match(token):
     return imt(token, t=(T.Operator, T.Wildcard))
Beispiel #43
0
def group_as(tlist):
    lfunc = lambda tk: not imt(tk, t=T.Keyword) or tk.value == 'NULL'
    rfunc = lambda tk: not imt(tk, t=(T.DML, T.DDL))
    _group_left_right(tlist, (T.Keyword, 'AS'), sql.Identifier,
                      valid_left=lfunc, valid_right=rfunc)
Beispiel #44
0
 def valid_prev(token):
     sqlcls = sql.SquareBrackets, sql.Identifier
     ttypes = T.Name, T.String.Symbol
     return imt(token, i=sqlcls, t=ttypes)
Beispiel #45
0
 def valid(token):
     return imt(token, i=sqlcls, m=m_role, t=ttypes)
Beispiel #46
0
 def valid(token):
     return imt(token, i=sqlcls, t=ttypes)
Beispiel #47
0
 def match(token):
     return imt(token, t=(T.Operator, T.Wildcard))
Beispiel #48
0
 def token_next_by(self, i=None, m=None, t=None, idx=0, end=None):
     funcs = lambda tk: imt(tk, i, m, t)
     return self._token_matching(funcs, idx, end)
Beispiel #49
0
 def valid_next(token):
     ttypes = T.DML, T.DDL
     return not imt(token, t=ttypes) and token is not None