예제 #1
0
    def run(self, sql, encoding=None):
        stream = lexer.tokenize(sql, encoding)
        # Process token stream
        if self.preprocess:
            for filter_ in self.preprocess:
                stream = filter_.process(self, stream)

        if (self.stmtprocess or self.postprocess or self.split_statements
                or self._grouping):
            splitter = StatementFilter()
            stream = splitter.process(self, stream)

        # import StripCommentsFilter in the run() method to avoid a circular dependency.
        # For stripping comments, the only grouping method we want to invoke is
        # grouping.group(), this considerably improves performance.
        strip_comments_only = False
        if self.stmtprocess and len(self.stmtprocess) == 1:
            from sqlparse.filters import StripCommentsFilter
            strip_comments_only = isinstance(self.stmtprocess[0],
                                             StripCommentsFilter)

        if self._grouping:

            def _group(stream):
                for stmt in stream:
                    if strip_comments_only:
                        grouping.group_comments(stmt)
                    else:
                        grouping.group(stmt)
                    yield stmt

            stream = _group(stream)

        if self.stmtprocess:

            def _run1(stream):
                ret = []
                for stmt in stream:
                    for filter_ in self.stmtprocess:
                        filter_.process(self, stmt)
                    ret.append(stmt)
                return ret

            stream = _run1(stream)

        if self.postprocess:

            def _run2(stream):
                for stmt in stream:
                    stmt.tokens = list(self._flatten(stmt.tokens))
                    for filter_ in self.postprocess:
                        stmt = filter_.process(self, stmt)
                    yield stmt

            stream = _run2(stream)

        return stream
예제 #2
0
def split(sql, encoding=None):
    """Split *sql* into single statements.

    :param sql: A string containting one or more SQL statements.
    :param encoding: The encoding of the statement (optional).
    :returns: A list of strings.
    """
    stream = lexer.tokenize(sql, encoding)
    splitter = StatementFilter()
    stream = splitter.process(None, stream)
    return [unicode(stmt).strip() for stmt in stream]
예제 #3
0
파일: __init__.py 프로젝트: 1ack/Impala
    def run(self, sql, encoding=None):
        stream = lexer.tokenize(sql, encoding)
        # Process token stream
        if self.preprocess:
            for filter_ in self.preprocess:
                stream = filter_.process(self, stream)

        if (self.stmtprocess or self.postprocess or self.split_statements
            or self._grouping):
            splitter = StatementFilter()
            stream = splitter.process(self, stream)

        # import StripCommentsFilter in the run() method to avoid a circular dependency.
        # For stripping comments, the only grouping method we want to invoke is
        # grouping.group(), this considerably improves performance.
        strip_comments_only = False
        if self.stmtprocess and len(self.stmtprocess) == 1:
          from sqlparse.filters import StripCommentsFilter
          strip_comments_only = isinstance(self.stmtprocess[0], StripCommentsFilter)

        if self._grouping:
            def _group(stream):
                for stmt in stream:
                    if strip_comments_only:
                        grouping.group_comments(stmt)
                    else:
                        grouping.group(stmt)
                    yield stmt
            stream = _group(stream)

        if self.stmtprocess:
            def _run1(stream):
                ret = []
                for stmt in stream:
                    for filter_ in self.stmtprocess:
                        filter_.process(self, stmt)
                    ret.append(stmt)
                return ret
            stream = _run1(stream)

        if self.postprocess:

            def _run2(stream):
                for stmt in stream:
                    stmt.tokens = list(self._flatten(stmt.tokens))
                    for filter_ in self.postprocess:
                        stmt = filter_.process(self, stmt)
                    yield stmt
            stream = _run2(stream)

        return stream
예제 #4
0
    def run(self, sql):
        stream = lexer.tokenize(sql)

        # Process token stream
        if self.preprocess:
            for filter_ in self.preprocess:
                stream = filter_.process(self, stream)

        if (self.stmtprocess or self.postprocess or self.split_statements
                or self._grouping):
            splitter = StatementFilter()
            stream = splitter.process(self, stream)

        if self._grouping:

            def _group(stream):
                for stmt in stream:
                    grouping.group(stmt)
                    yield stmt

            stream = _group(stream)

        if self.stmtprocess:

            def _run1(stream):
                ret = []
                for stmt in stream:
                    for filter_ in self.stmtprocess:
                        filter_.process(self, stmt)
                    ret.append(stmt)
                return ret

            stream = _run1(stream)

        if self.postprocess:

            def _run2(stream):
                for stmt in stream:
                    stmt.tokens = list(self._flatten(stmt.tokens))
                    for filter_ in self.postprocess:
                        stmt = filter_.process(self, stmt)
                    yield stmt

            stream = _run2(stream)

        return stream
예제 #5
0
    def run(self, sql):
        stream = lexer.tokenize(sql)
        # Process token stream
        if self.preprocess:
            for filter_ in self.preprocess:
                stream = filter_.process(self, stream)

        if (self.stmtprocess or self.postprocess or self.split_statements
            or self._grouping):
            splitter = StatementFilter()
            stream = splitter.process(self, stream)

        if self._grouping:

            def _group(stream):
                # modified by rrana
                pass
                for stmt in stream:
                    grouping.group(stmt) 
                    yield stmt
            stream = _group(stream)

        if self.stmtprocess:

            def _run1(stream):
                ret = []
                for stmt in stream:
                    for filter_ in self.stmtprocess:
                        filter_.process(self, stmt)
                    ret.append(stmt)
                return ret
            stream = _run1(stream)

        if self.postprocess:

            def _run2(stream):
                for stmt in stream:
                    stmt.tokens = list(self._flatten(stmt.tokens))
                    for filter_ in self.postprocess:
                        stmt = filter_.process(self, stmt)
                    yield stmt
            stream = _run2(stream)

        return stream
예제 #6
0
def split2(stream):
    splitter = StatementFilter()
    return list(splitter.process(None, stream))
예제 #7
0
파일: __init__.py 프로젝트: CometHale/lphw
def split2(stream):
    splitter = StatementFilter()
    return list(splitter.process(None, stream))
예제 #8
0
def split2(stream):
    from sqlparse.engine.filter import StatementFilter
    splitter = StatementFilter()
    return list(splitter.process(None, stream))
예제 #9
0
def split2(stream):
    from sqlparse.engine.filter import StatementFilter
    splitter = StatementFilter()
    return list(splitter.process(None, stream))