コード例 #1
0
ファイル: create.py プロジェクト: tipresias/tipresias
def _split_column_identifiers_by_comma(
    column_identifiers: token_groups.IdentifierList,
) -> typing.List[token_groups.TokenList]:
    column_tokens = list(column_identifiers.flatten())
    column_token_list = token_groups.TokenList(column_tokens)
    comma_idxs: typing.List[typing.Optional[int]] = [None]
    comma_idx = -1

    while True:
        if comma_idx is None:
            break

        comma_idx, _ = column_token_list.token_next_by(
            m=(token_types.Punctuation, ","), idx=comma_idx)

        comma_idxs.append(comma_idx)

    column_group_ranges = [(comma_idxs[comma_idx], comma_idxs[comma_idx + 1])
                           for comma_idx in range(0,
                                                  len(comma_idxs) - 1)]

    return [
        token_groups.TokenList(
            column_tokens[(start if start is None else start + 1):stop])
        for start, stop in column_group_ranges
    ]
コード例 #2
0
def parse_out_editable_pieces(file_name):
    sql_txt = file_output(file_name)
    view_create_commands = [cv for cv in sqlparse.split(sql_txt)
                            if sqlparse.parse(cv)[0].token_first(skip_ws=True, skip_cm=True)
                            .match(ttypes.Keyword.DDL, 'CREATE OR REPLACE')]

    for create in view_create_commands:
        parsed_create = sqlparse.parse(create)[0]
        create_tokens = [t for t in sql.TokenList(parsed_create.tokens)
                  if t.ttype not in (ttypes.Whitespace, ttypes.Whitespace.Newline)]
        create_token_list = sql.TokenList(create_tokens)
        create_union_indexes = []

        # TODO: Find start of Unions
        for index, token in enumerate(create_token_list):
            # Only find SELECT first then UNION ALL
            match_text = 'SELECT' if len(create_union_indexes) == 0 else 'UNION ALL'
            target_type = ttypes.Keyword.DML if len(create_union_indexes) == 0 else ttypes.Keyword

            if token.match(target_type, match_text):
                create_union_indexes.append(index)

        print(create_union_indexes)

        # TODO: group unions into statements
        first_union = create_union_indexes[0]
        union_count = len(create_union_indexes)
        create_union_indexes.reverse()

        for index, union_index in enumerate(create_union_indexes):
            # Find the column declarations
            end = len(create_token_list.tokens)-1 if index == 0 else create_union_indexes[index-1]
            create_token_list.group_tokens(sql.Statement, start=union_index, end=end, include_end=False)
            # token_list.token_next_by(idx=union_location, t=[[sql.IdentifierList]], end=select_locations[(index + 1)])

        # TODO: Iterate through created union statements to find each key
        for tk_index in range(first_union, (first_union+union_count)-1):
            # TODO: grab table name for mapping to update string
            union = create_token_list[tk_index]
            found_key = False
            for line in union:
                # TODO: Identify the list of column names
                if isinstance(line, sql.IdentifierList):
                    # column_list = [t for t in sql.TokenList(token)
                    #                if t.ttype not in (ttypes.Whitespace, ttypes.Whitespace.Newline)]
                    for identifier in line:
                        # TODO: filter down to key
                        if hasattr(identifier, 'tokens'):
                            # Remove comments because the lump into the end of an identifier when split
                            _stripped_values = [t.value for t in identifier.tokens if not isinstance(t, sql.Comment)]
                            if isinstance(identifier, sql.Identifier) and 'channelmix_key' in _stripped_values:
                                found_key = True
                                print(f"Union {tk_index} channelmix key in identifier: {identifier}")
            if not found_key:
                print(f'Key not found for {line}')
コード例 #3
0
ファイル: filters.py プロジェクト: edric-shen/sqlparse
    def _split_tokens_by_comma(self, tokens):
        split_token_lists = []

        start = 0
        for index, token in enumerate(tokens):
            if token.match(T.Punctuation, ','):
                split_token_lists.append(sql.TokenList(tokens[start:index]))
                start = index + 1
        if tokens[start:]:
            split_token_lists.append(sql.TokenList(tokens[start:]))

        return split_token_lists
コード例 #4
0
ファイル: test_tokenize.py プロジェクト: vmuriart/sqlparse
def test_tokenlist_token_matching():
    t1 = sql.Token(T.Keyword, 'foo')
    t2 = sql.Token(T.Punctuation, ',')
    x = sql.TokenList([t1, t2])
    assert x.token_matching([lambda t: t.ttype is T.Keyword], 0) == t1
    assert x.token_matching([lambda t: t.ttype is T.Punctuation], 0) == t2
    assert x.token_matching([lambda t: t.ttype is T.Keyword], 1) is None
コード例 #5
0
ファイル: aligned_indent.py プロジェクト: ayanmadaan/ecomm
    def _process_statement(self, tlist):
        if len(tlist.tokens) > 0 and tlist.tokens[0].is_whitespace \
                and self.indent == 0:
            tlist.tokens.pop(0)

        # process the main query body
        self._process(sql.TokenList(tlist.tokens))
コード例 #6
0
def test_issue212_py2unicode():
    if sys.version_info < (3, ):
        t1 = sql.Token(T.String, u"schöner ")
    else:
        t1 = sql.Token(T.String, "schöner ")
    t2 = sql.Token(T.String, u"bug")
    l = sql.TokenList([t1, t2])
    assert str(l) == 'schöner bug'
 def test_token_matching(self):
     t1 = sql.Token(Keyword, 'foo')
     t2 = sql.Token(Punctuation, ',')
     x = sql.TokenList([t1, t2])
     self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
                      t1)
     self.assertEqual(
         x.token_matching(0, [lambda t: t.ttype is Punctuation]), t2)
     self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
                      None)
コード例 #8
0
ファイル: test_tokenize.py プロジェクト: vmuriart/sqlparse
def test_tokenlist_first():
    p = sqlparse.parse(' select foo')[0]
    first = p.token_first()
    assert first.value == 'select'
    assert p.token_first(skip_ws=False).value == ' '
    assert sql.TokenList([]).token_first() is None
コード例 #9
0
ファイル: test_regressions.py プロジェクト: wind39/sqlparse
def test_issue212_py2unicode():
    t1 = sql.Token(T.String, u'schöner ')
    t2 = sql.Token(T.String, 'bug')
    token_list = sql.TokenList([t1, t2])
    assert str(token_list) == 'schöner bug'
コード例 #10
0
ファイル: test_tokenize.py プロジェクト: JasonMWhite/sqlparse
 def test_token_first(self):
     p = sqlparse.parse(' select foo')[0]
     first = p.token_first()
     self.assertEqual(first.value, 'select')
     self.assertEqual(p.token_first(skip_ws=False).value, ' ')
     self.assertEqual(sql.TokenList([]).token_first(), None)