Exemplo n.º 1
0
    def _process_identifierlist(self, tlist):
        identifiers = list(tlist.get_identifiers())
        if self.indent_columns:
            first = next(identifiers[0].flatten())
            num_offset = 1 if self.char == '\t' else self.width
        else:
            first = next(identifiers.pop(0).flatten())
            num_offset = 1 if self.char == '\t' else self._get_offset(first)

        if not tlist.within(sql.Function):
            with offset(self, num_offset):
                position = 0
                for token in identifiers:
                    # Add 1 for the "," separator
                    position += len(token.value) + 1
                    if position > (self.wrap_after - self.offset):
                        adjust = 0
                        if self.comma_first:
                            adjust = -2
                            _, comma = tlist.token_prev(
                                tlist.token_index(token))
                            if comma is None:
                                continue
                            token = comma
                        tlist.insert_before(token, self.nl(offset=adjust))
                        if self.comma_first:
                            _, ws = tlist.token_next(tlist.token_index(token),
                                                     skip_ws=False)
                            if (ws is not None
                                    and ws.ttype is not T.Text.Whitespace):
                                tlist.insert_after(
                                    token, sql.Token(T.Whitespace, ' '))
                        position = 0
        else:
            # ensure whitespace
            for token in tlist:
                _, next_ws = tlist.token_next(tlist.token_index(token),
                                              skip_ws=False)
                if token.value == ',' and not next_ws.is_whitespace:
                    tlist.insert_after(token, sql.Token(T.Whitespace, ' '))

            end_at = self.offset + sum(len(i.value) + 1 for i in identifiers)
            adjusted_offset = 0
            if (self.wrap_after > 0 and end_at >
                (self.wrap_after - self.offset) and self._last_func):
                adjusted_offset = -len(self._last_func.value) - 1

            with offset(self, adjusted_offset), indent(self):
                if adjusted_offset < 0:
                    tlist.insert_before(identifiers[0], self.nl())
                position = 0
                for token in identifiers:
                    # Add 1 for the "," separator
                    position += len(token.value) + 1
                    if (self.wrap_after > 0 and position >
                        (self.wrap_after - self.offset)):
                        adjust = 0
                        tlist.insert_before(token, self.nl(offset=adjust))
                        position = 0
        self._process_default(tlist)
Exemplo n.º 2
0
 def _process_identifierlist(self, tlist):
     # columns being selected
     identifiers = list(tlist.get_identifiers())
     t0 = identifiers.pop(0)
     with offset(self, self.get_offset(t0)):
         [tlist.insert_before(token, self.nl(0)) for token in identifiers]
     self._process_default(tlist)
Exemplo n.º 3
0
 def _process_identifierlist(self, tlist):
     identifiers = list(tlist.get_identifiers())
     first = next(identifiers.pop(0).flatten())
     num_offset = 1 if self.char == '\t' else self._get_offset(first)
     if not tlist.within(sql.Function):
         with offset(self, num_offset):
             position = 0
             for token in identifiers:
                 # Add 1 for the "," separator
                 position += len(token.value) + 1
                 if position > (self.wrap_after - self.offset):
                     adjust = 0
                     if self.comma_first:
                         adjust = -2
                         _, comma = tlist.token_prev(
                             tlist.token_index(token))
                         if comma is None:
                             continue
                         token = comma
                     tlist.insert_before(token, self.nl(offset=adjust))
                     if self.comma_first:
                         _, ws = tlist.token_next(
                             tlist.token_index(token), skip_ws=False)
                         if (ws is not None
                                 and ws.ttype is not T.Text.Whitespace):
                             tlist.insert_after(
                                 token, sql.Token(T.Whitespace, ' '))
                     position = 0
     self._process_default(tlist)
Exemplo n.º 4
0
 def _process_identifierlist(self, tlist):
     identifiers = list(tlist.get_identifiers())
     first = next(identifiers.pop(0).flatten())
     num_offset = 1 if self.char == '\t' else self._get_offset(first)
     if not tlist.within(sql.Function):
         with offset(self, num_offset):
             position = 0
             for token in identifiers:
                 # Add 1 for the "," separator
                 position += len(token.value) + 1
                 if position > (self.wrap_after - self.offset):
                     adjust = 0
                     if self.comma_first:
                         adjust = -2
                         _, comma = tlist.token_prev(
                             tlist.token_index(token))
                         if comma is None:
                             continue
                         token = comma
                     tlist.insert_before(token, self.nl(offset=adjust))
                     if self.comma_first:
                         _, ws = tlist.token_next(tlist.token_index(token),
                                                  skip_ws=False)
                         if (ws is not None
                                 and ws.ttype is not T.Text.Whitespace):
                             tlist.insert_after(
                                 token, sql.Token(T.Whitespace, ' '))
                     position = 0
     self._process_default(tlist)
Exemplo n.º 5
0
    def _process_parenthesis(self, tlist):
        is_DML_DLL = tlist.token_next_by(t=(T.Keyword.DML, T.Keyword.DDL))
        first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)

        with indent(self, 1 if is_DML_DLL else 0):
            tlist.tokens.insert(0, self.nl()) if is_DML_DLL else None
            with offset(self, self._get_offset(first) + 1):
                self._process_default(tlist, not is_DML_DLL)
Exemplo n.º 6
0
    def _process_case(self, tlist):
        iterable = iter(tlist.get_cases())
        cond, _ = next(iterable)
        first = next(cond[0].flatten())

        with offset(self, self._get_offset(tlist[0])):
            with offset(self, self._get_offset(first)):
                for cond, value in iterable:
                    token = value[0] if cond is None else cond[0]
                    tlist.insert_before(token, self.nl())

                # Line breaks on group level are done. let's add an offset of
                # len "when ", "then ", "else "
                with offset(self, len("WHEN ")):
                    self._process_default(tlist)
            end_idx, end = tlist.token_next_by(m=sql.Case.M_CLOSE)
            tlist.insert_before(end_idx, self.nl())
Exemplo n.º 7
0
    def _process_case(self, tlist):
        iterable = iter(tlist.get_cases())
        cond, _ = next(iterable)
        first = next(cond[0].flatten())

        with offset(self, self._get_offset(tlist[0])):
            with offset(self, self._get_offset(first)):
                for cond, value in iterable:
                    token = value[0] if cond is None else cond[0]
                    tlist.insert_before(token, self.nl())

                # Line breaks on group level are done. let's add an offset of
                # len "when ", "then ", "else "
                with offset(self, len("WHEN ")):
                    self._process_default(tlist)
            end_idx, end = tlist.token_next_by(m=sql.Case.M_CLOSE)
            tlist.insert_before(end_idx, self.nl())
Exemplo n.º 8
0
 def _process_default(self, tlist):
     self._split_kwds(tlist)
     # process any sub-sub statements
     for sgroup in tlist.get_sublists():
         prev = tlist.token_prev(sgroup)
         # HACK: make "group/order by" work. Longer than max_len.
         offset_ = 3 if (prev and prev.match(T.Keyword, 'BY')) else 0
         with offset(self, offset_):
             self._process(sgroup)
Exemplo n.º 9
0
 def _process_default(self, tlist):
     self._split_kwds(tlist)
     # process any sub-sub statements
     for sgroup in tlist.get_sublists():
         prev = tlist.token_prev(sgroup)
         # HACK: make "group/order by" work. Longer than max_len.
         offset_ = 3 if (prev and prev.match(T.Keyword, 'BY')) else 0
         with offset(self, offset_):
             self._process(sgroup)
Exemplo n.º 10
0
    def _process_parenthesis(self, tlist):
        ttypes = T.Keyword.DML, T.Keyword.DDL
        _, is_dml_dll = tlist.token_next_by(t=ttypes)
        fidx, first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)

        with indent(self, 1 if is_dml_dll else 0):
            tlist.tokens.insert(0, self.nl()) if is_dml_dll else None
            with offset(self, self._get_offset(first) + 1):
                self._process_default(tlist, not is_dml_dll)
Exemplo n.º 11
0
    def _process_parenthesis(self, tlist):
        ttypes = T.Keyword.DML, T.Keyword.DDL
        _, is_dml_dll = tlist.token_next_by(t=ttypes)
        fidx, first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)

        with indent(self, 1 if is_dml_dll else 0):
            tlist.tokens.insert(0, self.nl()) if is_dml_dll else None
            with offset(self, self._get_offset(first) + 1):
                self._process_default(tlist, not is_dml_dll)
Exemplo n.º 12
0
 def _process_default(self, tlist):
     self._split_kwds(tlist)
     # process any sub-sub statements
     for sgroup in tlist.get_sublists():
         idx = tlist.token_index(sgroup)
         pidx, prev_ = tlist.token_prev(idx)
         # HACK: make "group/order by" work. Longer than max_len.
         offset_ = 3 if (prev_ and prev_.match(T.Keyword, "BY")) else 0
         with offset(self, offset_):
             self._process(sgroup)
Exemplo n.º 13
0
 def _process_default(self, tlist):
     self._split_kwds(tlist)
     # process any sub-sub statements
     for sgroup in tlist.get_sublists():
         idx = tlist.token_index(sgroup)
         pidx, prev_ = tlist.token_prev(idx)
         # HACK: make "group/order by" work. Longer than max_len.
         offset_ = 3 if (prev_ and prev_.match(
             T.Keyword, self.by_words, regex=True)) else 0
         with offset(self, offset_):
             self._process(sgroup)
Exemplo n.º 14
0
    def _process_parenthesis(self, tlist):
        ttypes = T.Keyword.DML, T.Keyword.DDL
        ttyper = T.Name.Builtin
        _, is_dml_dll = tlist.token_next_by(t=ttypes)
        _, is_builtin = tlist.token_next_by(t=ttyper)
        fidx, first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)

        with indent(self, 1 if is_dml_dll else 0):
            if is_dml_dll:
                tlist.tokens.insert(0, self.nl())
            else:
                pass
            with offset(self, self._get_offset(first) + 1):
                self._process_default(tlist, not is_dml_dll)
Exemplo n.º 15
0
 def _process_identifierlist(self, tlist):
     identifiers = list(tlist.get_identifiers())
     first = next(identifiers.pop(0).flatten())
     num_offset = 1 if self.char == '\t' else self._get_offset(first)
     if not tlist.within(sql.Function):
         with offset(self, num_offset):
             position = 0
             for token in identifiers:
                 # Add 1 for the "," separator
                 position += len(token.value) + 1
                 if position > (self.wrap_after - self.offset):
                     tlist.insert_before(token, self.nl())
                     position = 0
     self._process_default(tlist)
Exemplo n.º 16
0
    def _process_parenthesis(self, tlist):
        # if this isn't a subquery, don't re-indent
        _, token = tlist.token_next_by(m=(T.DML, 'SELECT'))
        if token is not None:
            with indent(self):
                tlist.insert_after(tlist[0], self.nl('SELECT'))
                # process the inside of the parantheses
                self._process_default(tlist)

            # de-indent last parenthesis
            tlist.insert_before(tlist[-1], self.nl())
        else:
            with offset(self, -1):
                self._process_default(tlist)
Exemplo n.º 17
0
 def _process_identifierlist(self, tlist):
     identifiers = list(tlist.get_identifiers())
     first = next(identifiers.pop(0).flatten())
     num_offset = 1 if self.char == '\t' else self._get_offset(first)
     if not tlist.within(sql.Function):
         with offset(self, num_offset):
             position = 0
             for token in identifiers:
                 # Add 1 for the "," separator
                 position += len(token.value) + 1
                 if position > (self.wrap_after - self.offset):
                     tlist.insert_before(token, self.nl())
                     position = 0
     self._process_default(tlist)
Exemplo n.º 18
0
    def _process_default(self, tlist):
        tidx_offset = 0
        prev_kw = None  # previous keyword match
        prev_tk = None  # previous token
        for idx, token in enumerate(list(tlist)):
            tidx = idx + tidx_offset

            if token.is_whitespace:
                continue

            if token.is_group:
                # HACK: make "group/order by" work. Longer than max_len.
                offset_ = 3 if (prev_tk and prev_tk.normalized == 'BY') else 0
                with offset(self, offset_):
                    self._process(token)

            if not token.match(T.Keyword, self.split_words, regex=True):
                prev_tk = token
                continue

            if token.normalized == 'BETWEEN':
                prev_kw = token
                continue

            if token.normalized == 'AND' and prev_kw is not None and (
                    prev_kw.normalized == 'BETWEEN'):
                prev_kw = token
                continue

            if token.match(T.Keyword, self.join_words, regex=True):
                token_indent = token.value.split()[0]
            else:
                token_indent = text_type(token)

            tlist.insert_before(tidx, self.nl(token_indent))
            tidx_offset += 1

            prev_kw = prev_tk = token
Exemplo n.º 19
0
    def _process_identifierlist(self, tlist):
        identifiers = list(tlist.get_identifiers())
        if self.indent_columns:
            first = next(identifiers[0].flatten())
            num_offset = 1 if self.char == '\t' else self.width
        else:
            first = next(identifiers.pop(0).flatten())
            num_offset = 1 if self.char == '\t' else self._get_offset(first)

        if not tlist.within(sql.Function):
            with offset(self, num_offset):
                position = 0
                for token in identifiers:
                    # Add 1 for the "," separator
                    position += len(token.value) + 1
                    if position > (self.wrap_after - self.offset):
                        adjust = 0
                        if self.comma_first:
                            adjust = -2
                            _, comma = tlist.token_prev(
                                tlist.token_index(token))
                            if comma is None:
                                continue
                            token = comma
                        tlist.insert_before(token, self.nl(offset=adjust))
                        if self.comma_first:
                            _, ws = tlist.token_next(
                                tlist.token_index(token), skip_ws=False)
                            if (ws is not None
                                    and ws.ttype is not T.Text.Whitespace):
                                tlist.insert_after(
                                    token, sql.Token(T.Whitespace, ' '))
                        position = 0
        else:
            # ensure whitespace
            for token in tlist:
                _, next_ws = tlist.token_next(
                    tlist.token_index(token), skip_ws=False)
                if token.value == ',' and not next_ws.is_whitespace:
                    tlist.insert_after(
                        token, sql.Token(T.Whitespace, ' '))

            end_at = self.offset + sum(len(i.value) + 1 for i in identifiers)
            adjusted_offset = 0
            if (self.wrap_after > 0
                    and end_at > (self.wrap_after - self.offset)
                    and self._last_func):
                adjusted_offset = -len(self._last_func.value) - 1

            with offset(self, adjusted_offset), indent(self):
                if adjusted_offset < 0:
                    tlist.insert_before(identifiers[0], self.nl())
                position = 0
                for token in identifiers:
                    # Add 1 for the "," separator
                    position += len(token.value) + 1
                    if (self.wrap_after > 0
                            and position > (self.wrap_after - self.offset)):
                        adjust = 0
                        tlist.insert_before(token, self.nl(offset=adjust))
                        position = 0
        self._process_default(tlist)