Exemplo n.º 1
0
    def apply(self, element):
        form = element.parent

        if is_identifier(element, '=') and \
                not ( form.first.next is element or
                      is_token(form.first.next, Tokens.PUNCTUATION, ',')):
                return


        form.wrap(form.first, element.prev, PreSeq)
        if element.next is not None:
            first_indent = element.next
            while first_indent is not None and not is_token(first_indent, Tokens.INDENT):
                first_indent = first_indent.next
            if first_indent is not None:
                if is_token(first_indent.prev, Tokens.PUNCTUATION, ':'):
                    form.wrap(element.next, form.last, PreForm)
                else:
                    form.replace(first_indent, Tokens.ARGBREAK())
                    form.wrap(element.next, form.last, PreSeq)
        form.remove(element)
        if isinstance(form, PreForm):
            form.prepend(Identifier("="))
        else:
            new_form = form.wrap(form.first, form.last, Form).code
            new_form.prepend(element)
        return None
Exemplo n.º 2
0
    def apply(self, element) -> Element:
        assert is_token(element.next, Tokens.COMMENT)
        if element.next.next is element.end:
            parent = element.parent
            next_element = element.end.next
            parent.remove(element.next)
            parent.remove(element)
            parent.remove(element.end)
            return next_element
        else:
            parent = element.parent
            """:type : Node"""
            new_form_element = parent.wrap(element, element.end, Form)
            new_form = new_form_element.code
            new_form.prepend(Identifier("debug-values", element.range))
            # str BEGIN_MACRO('#') seq of STRING tokens, interspersed with Identifiers and BEGIN_MACRO / END_MACRO pairs END_MACRO

            new_form.remove(element) # remove BEGIN_MACRO('#')
            new_form.remove(element.end)  # remove END_MACRO

            elm = new_form[1] # first element
            while elm is not None:
                if is_token(elm, Tokens.BEGIN_MACRO):
                    elm = elm.end.next
                elif is_token(elm, Tokens.COMMENT):
                    nxt = elm.next
                    new_form.remove(elm)
                    elm = nxt
                else:
                    elm = elm.next

            return new_form_element.next
Exemplo n.º 3
0
    def apply(self, element) -> Element:
        assert is_token(element.next, Tokens.COMMENT)
        if element.next.next is element.end:
            parent = element.parent
            next_element = element.end.next
            parent.remove(element.next)
            parent.remove(element)
            parent.remove(element.end)
            return next_element
        else:
            parent = element.parent
            """:type : Node"""
            new_form_element = parent.wrap(element, element.end, Form)
            new_form = new_form_element.code
            new_form.prepend(Identifier("debug-values", element.range))
            # str BEGIN_MACRO('#') seq of STRING tokens, interspersed with Identifiers and BEGIN_MACRO / END_MACRO pairs END_MACRO

            new_form.remove(element)  # remove BEGIN_MACRO('#')
            new_form.remove(element.end)  # remove END_MACRO

            elm = new_form[1]  # first element
            while elm is not None:
                if is_token(elm, Tokens.BEGIN_MACRO):
                    elm = elm.end.next
                elif is_token(elm, Tokens.COMMENT):
                    nxt = elm.next
                    new_form.remove(elm)
                    elm = nxt
                else:
                    elm = elm.next

            return new_form_element.next
Exemplo n.º 4
0
    def apply(self, element):
        form = element.parent

        if is_identifier(element, '=') and \
                not ( form.first.next is element or
                      is_token(form.first.next, Tokens.PUNCTUATION, ',')):
            return

        form.wrap(form.first, element.prev, PreSeq)
        if element.next is not None:
            first_indent = element.next
            while first_indent is not None and not is_token(
                    first_indent, Tokens.INDENT):
                first_indent = first_indent.next
            if first_indent is not None:
                if is_token(first_indent.prev, Tokens.PUNCTUATION, ':'):
                    form.wrap(element.next, form.last, PreForm)
                else:
                    form.replace(first_indent, Tokens.ARGBREAK())
                    form.wrap(element.next, form.last, PreSeq)
        form.remove(element)
        if isinstance(form, PreForm):
            form.prepend(Identifier("="))
        else:
            new_form = form.wrap(form.first, form.last, Form).code
            new_form.prepend(element)
        return None
Exemplo n.º 5
0
def _element_after(element) -> Element:
    """
    Skips ``BEGIN_MACRO`` / ``END_MACRO`` and ``BEGIN`` / ``END`` pairs.
    """
    if is_token(element, Tokens.BEGIN_MACRO) or is_token(element, Tokens.BEGIN):
        return element.end.next
    else:
        return element.next
Exemplo n.º 6
0
def _element_after(element) -> Element:
    """
    Skips ``BEGIN_MACRO`` / ``END_MACRO`` and ``BEGIN`` / ``END`` pairs.
    """
    if is_token(element, Tokens.BEGIN_MACRO) or is_token(
            element, Tokens.BEGIN):
        return element.end.next
    else:
        return element.next
Exemplo n.º 7
0
    def apply(self, element: Element):
        form = element.code
        elm = form[1]
        in_elm = None
        colon_elm = None
        ind_elm = None
        while elm is not None:
            if is_identifier(elm, 'in'):
                in_elm = elm
            elif is_token(elm, PUNCTUATION, ':'):
                colon_elm = elm
                break
            elm = elm.next

        if colon_elm is None or in_elm is None:
            raise ArrangementError(element.range, "`for` form must be punctuated by `in` and `:`.")
        if in_elm.prev is form[0]:
            raise ArrangementError(element.range, "No element before `in` in `for` form.")
        if in_elm.next is colon_elm:
            raise ArrangementError(element.range, "No element between `in` and `:` in `for` form.")

        form.wrap(form[1], in_elm.prev, PreSeq)
        form.wrap(in_elm.next, colon_elm.prev, PreSeq)
        form.remove(in_elm)

        return DefaultPunctuation.apply(self, element)
Exemplo n.º 8
0
def has_unique_block(opening_delimiter_element):
    """
    Returns ``True`` if the given ``BEGIN_MACRO`` token is of the form::

       BEGIN_MACRO BEGIN  END END_MACRO

    (so there is a single ``BEGIN`` / ``END`` pair between the given ``BEGIN_MACRO`` and ``END_MACRO`` pair)
    """
    closing_delimiter_element = opening_delimiter_element.end
    begin_element = opening_delimiter_element.next
    assert is_token(begin_element, Tokens.BEGIN)
    assert begin_element.end is not None
    end_element = begin_element.end
    assert end_element.next is closing_delimiter_element or is_token(end_element.next, Tokens.BEGIN)

    return end_element.next is closing_delimiter_element
Exemplo n.º 9
0
def is_opening_delimiter(element: Element, opening_delimiter_str: str):
    """
    Returns ``True`` if the given element is a ``BEGIN_MACRO`` token with ``text == opening_delimiter_str``.
    """
    return is_token(element,
                    Tokens.BEGIN_MACRO,
                    token_text=opening_delimiter_str)
 def applies(self, element:Element):
     return (
         not element.is_first() and
         is_token(element, Tokens.CONSTITUENT) and
         identifier_in(element.value, self.sym_vals) and
         is_not_none(element.prev, ".code.range.position_after.index", element, ".range.first_position.index") and
         element.prev.code.range.position_after.index == element.range.first_position.index
     )
 def applies(self, element: Element):
     return (not element.is_first()
             and is_token(element, Tokens.CONSTITUENT)
             and identifier_in(element.value, self.sym_vals) and
             is_not_none(element.prev, ".code.range.position_after.index",
                         element, ".range.first_position.index")
             and element.prev.code.range.position_after.index
             == element.range.first_position.index)
Exemplo n.º 12
0
def has_unique_block(opening_delimiter_element):
    """
    Returns ``True`` if the given ``BEGIN_MACRO`` token is of the form::

       BEGIN_MACRO BEGIN  END END_MACRO

    (so there is a single ``BEGIN`` / ``END`` pair between the given ``BEGIN_MACRO`` and ``END_MACRO`` pair)
    """
    closing_delimiter_element = opening_delimiter_element.end
    begin_element = opening_delimiter_element.next
    assert is_token(begin_element, Tokens.BEGIN)
    assert begin_element.end is not None
    end_element = begin_element.end
    assert end_element.next is closing_delimiter_element or is_token(
        end_element.next, Tokens.BEGIN)

    return end_element.next is closing_delimiter_element
Exemplo n.º 13
0
 def find_punctuation(self, value, before=None):
     if before is None: before = self.end
     punct = None
     elm = self.next
     while elm is not None and elm is not before:
         if is_token(elm, PUNCTUATION, ':'):
             punct = elm
             break
         elm = elm.next
     return punct
Exemplo n.º 14
0
    def apply(self, element) -> Element:
        if element.next is element.end:
            parent = element.parent
            parent.remove(element.end)
            element.code = Literal(
                "", self.string_literal_type,
                StreamRange(element.range.position_after,
                            element.range.position_after))
            return element.next

        assert is_token(element.next, Tokens.STRING)

        if element.next.next is element.end:
            parent = element.parent
            string_token = element.next
            parent.remove(element)
            parent.remove(element.end)
            string_token.code = Literal(string_token.value,
                                        self.string_literal_type,
                                        string_token.range)
            return string_token.next
        else:
            # FIXME: allow for interpolation
            new_form_element = element.parent.wrap(element, element.end, Form)
            new_form = new_form_element.code
            new_form.prepend(Identifier("str", element.range))
            # str BEGIN_MACRO('“') seq of STRING tokens, interspersed with Identifiers and BEGIN_MACRO / END_MACRO pairs END_MACRO

            new_form.remove(element)  # remove BEGIN_MACRO('“')
            new_form.remove(element.end)  # remove END_MACRO

            elm = new_form[1]  # first element
            while elm is not None:
                if is_token(elm, Tokens.STRING):
                    elm.code = Literal(elm.value, self.string_literal_type,
                                       elm.range)
                if is_token(elm, Tokens.BEGIN_MACRO):
                    elm = elm.end.next
                else:
                    elm = elm.next

            return new_form_element.next
 def apply(self, element):
     form = element.parent
     next = element.next
     if is_identifier(next) or is_literal(next):
         new_form_element = form.wrap(element, next, Form)
     elif is_token(next, Tokens.BEGIN_MACRO):
         new_form_element = form.wrap(element, next.end, Form)
     else:
         raise ArrangementError(next.range.first_position, "Expected identifier, literal or begin-macro-token after '%s' identifier in position %s." %(element.value, element.range.first_position.nameless_str))
     new_form = new_form_element.code
     new_form.remove(element)
     new_form.prepend(element)
     return new_form_element.next
Exemplo n.º 16
0
    def apply(self, element) -> Element:
        if element.next is element.end:
            parent = element.parent
            parent.remove(element.end)
            element.code = Literal("", self.string_literal_type, StreamRange(element.range.position_after, element.range.position_after))
            return element.next

        assert is_token(element.next, Tokens.STRING)

        if element.next.next is element.end:
            parent = element.parent
            string_token = element.next
            parent.remove(element)
            parent.remove(element.end)
            string_token.code = Literal(string_token.value, self.string_literal_type, string_token.range)
            return string_token.next
        else:
            # FIXME: allow for interpolation
            new_form_element = element.parent.wrap(element, element.end, Form)
            new_form = new_form_element.code
            new_form.prepend(Identifier("str", element.range))
            # str BEGIN_MACRO('“') seq of STRING tokens, interspersed with Identifiers and BEGIN_MACRO / END_MACRO pairs END_MACRO

            new_form.remove(element) # remove BEGIN_MACRO('“')
            new_form.remove(element.end)  # remove END_MACRO

            elm = new_form[1] # first element
            while elm is not None:
                if is_token(elm, Tokens.STRING):
                    elm.code = Literal(elm.value, self.string_literal_type, elm.range)
                if is_token(elm, Tokens.BEGIN_MACRO):
                    elm = elm.end.next
                else:
                    elm = elm.next



            return new_form_element.next
Exemplo n.º 17
0
def explode_list_of_args(first_begin_token):
    """
    Suppose we have a node with a sequence of segments, given by ``BEGIN``/``END`` token pairs::

         (BEGIN  END)* 

    and we are given the first ``BEGIN`` token.

    Then this function removes all ``BEGIN`` and ``END`` pairs that have neither a colon nor indentation, and returns a list
    concatenating every list of punctuation tokens in the removed ``BEGIN`` tokens.

    Furthermore, it adds commas im place of each removed ``END`` token that is not preceeded by a comma.

    E.g. ``⟅BEGIN ab, c, END BEGIN d, e END BEGIN h : a1 a2 END⟆`` becomes ``ab, c, d, e BEGIN h : a1 a2 END⟩.``

    :param first_begin_token: The first ``BEGIN`` token in the sequence.
    """

    node = first_begin_token.parent
    begin_token = first_begin_token

    while is_token(begin_token, Tokens.BEGIN):
        assert begin_token.end is not None
        end_token = begin_token.end
        assert is_token(end_token, Tokens.END)
        after_end = end_token.next
        make_preform = len(
            begin_token.indents) > 0 or begin_token.find_punctuation(':')

        if not make_preform:  # if there is no INDENT token and no colons
            arg_break = Tokens.ARGBREAK()
            node.insert(end_token.prev, arg_break)
            node.remove(begin_token)
            node.remove(end_token)
        else:
            arg_break = Tokens.ARGBREAK()
            node.insert(end_token, arg_break)
        begin_token = after_end
Exemplo n.º 18
0
def explode_list_of_args(first_begin_token):
    """
    Suppose we have a node with a sequence of segments, given by ``BEGIN``/``END`` token pairs::

         (BEGIN  END)* 

    and we are given the first ``BEGIN`` token.

    Then this function removes all ``BEGIN`` and ``END`` pairs that have neither a colon nor indentation, and returns a list
    concatenating every list of punctuation tokens in the removed ``BEGIN`` tokens.

    Furthermore, it adds commas im place of each removed ``END`` token that is not preceeded by a comma.

    E.g. ``⟅BEGIN ab, c, END BEGIN d, e END BEGIN h : a1 a2 END⟆`` becomes ``ab, c, d, e BEGIN h : a1 a2 END⟩.``

    :param first_begin_token: The first ``BEGIN`` token in the sequence.
    """

    node = first_begin_token.parent
    begin_token = first_begin_token

    while is_token(begin_token, Tokens.BEGIN):
        assert begin_token.end is not None
        end_token = begin_token.end
        assert is_token(end_token, Tokens.END)
        after_end = end_token.next
        make_preform = len(begin_token.indents) > 0 or begin_token.find_punctuation(':')

        if not make_preform: # if there is no INDENT token and no colons
            arg_break = Tokens.ARGBREAK()
            node.insert(end_token.prev, arg_break)
            node.remove(begin_token)
            node.remove(end_token)
        else:
            arg_break = Tokens.ARGBREAK()
            node.insert(end_token, arg_break)
        begin_token = after_end
 def apply(self, element):
     form = element.parent
     next = element.next
     if is_identifier(next) or is_literal(next):
         new_form_element = form.wrap(element, next, Form)
     elif is_token(next, Tokens.BEGIN_MACRO):
         new_form_element = form.wrap(element, next.end, Form)
     else:
         raise ArrangementError(
             next.range.first_position,
             "Expected identifier, literal or begin-macro-token after '%s' identifier in position %s."
             % (element.value, element.range.first_position.nameless_str))
     new_form = new_form_element.code
     new_form.remove(element)
     new_form.prepend(element)
     return new_form_element.next
Exemplo n.º 20
0
def anoky_tokenize(stream,options):
    tokenized_node = __parser__.tokenize_into_node(stream, emmit_restart_tokens=False)
    if options.print_tokens:
        print('\n——›–  Tokenized source  –‹——')
        for token in tokenized_node:
            print(str(token))
    errors = []
    for token in tokenized_node:
        if is_token(token, Tokens.ERROR):
            errors.append(token)
    if len(errors) > 0:
        message = ''
        for token in errors:
            if token.message is not None and token.message != '':
                message += '%s: %s\n' % (token.range, token.message)
        raise TokenizingError(None, message)
    return tokenized_node
Exemplo n.º 21
0
    def apply(self, element) -> Element:
        first_begin = element.next
        assert is_token(first_begin, Tokens.BEGIN)

        new_seq_element = element.parent.wrap(element, element.end, PreSeq)
        new_seq = new_seq_element.code

        begin_element = element.next
        last_element = new_seq.last
        new_seq.remove(element) # remove BEGIN_MACRO('(')
        new_seq.remove(last_element)  # remove END_MACRO(')')
        #if begin_element is last_element: # if we have something like a[] or a{} or so...
        #    return new_tuple_element.next
        #else:
        #    return \
        Util.explode_list_of_args(begin_element)
        return new_seq_element.next
Exemplo n.º 22
0
    def apply(self, element) -> Element:
        first_begin = element.next
        assert is_token(first_begin, Tokens.BEGIN)

        new_seq_element = element.parent.wrap(element, element.end, PreSeq)
        new_seq = new_seq_element.code

        begin_element = element.next
        last_element = new_seq.last
        new_seq.remove(element)  # remove BEGIN_MACRO('(')
        new_seq.remove(last_element)  # remove END_MACRO(')')
        #if begin_element is last_element: # if we have something like a[] or a{} or so...
        #    return new_tuple_element.next
        #else:
        #    return \
        Util.explode_list_of_args(begin_element)
        return new_seq_element.next
Exemplo n.º 23
0
def anoky_tokenize(stream, options):
    tokenized_node = __parser__.tokenize_into_node(stream,
                                                   emmit_restart_tokens=False)
    if options.print_tokens:
        print('\n——›–  Tokenized source  –‹——')
        for token in tokenized_node:
            print(str(token))
    errors = []
    for token in tokenized_node:
        if is_token(token, Tokens.ERROR):
            errors.append(token)
    if len(errors) > 0:
        message = ''
        for token in errors:
            if token.message is not None and token.message != '':
                message += '%s: %s\n' % (token.range, token.message)
        raise TokenizingError(None, message)
    return tokenized_node
Exemplo n.º 24
0
 def apply(self, element):
     form = element.parent
     form.wrap(form.first, element.prev, PreSeq)
     if element.next is not None:
         first_indent = element.next
         while first_indent is not None and not is_token(first_indent, Tokens.INDENT):
             first_indent = first_indent.next
         if first_indent is not None:
             new_form = form.wrap(element.next, first_indent, PreSeq).code
             new_form.remove(first_indent)
         else:
             form.wrap(element.next, form.last, PreSeq)
     form.remove(element)
     if isinstance(form, PreForm):
         form.prepend(element)
     else:
         new_form = form.wrap(form.first, form.last, Form).code
         new_form.replace(new_form.first, element)
     return None
Exemplo n.º 25
0
 def apply(self, element):
     form = element.parent
     form.wrap(form.first, element.prev, PreSeq)
     if element.next is not None:
         first_indent = element.next
         while first_indent is not None and not is_token(
                 first_indent, Tokens.INDENT):
             first_indent = first_indent.next
         if first_indent is not None:
             new_form = form.wrap(element.next, first_indent, PreSeq).code
             new_form.remove(first_indent)
         else:
             form.wrap(element.next, form.last, PreSeq)
     form.remove(element)
     if isinstance(form, PreForm):
         form.prepend(element)
     else:
         new_form = form.wrap(form.first, form.last, Form).code
         new_form.replace(new_form.first, element)
     return None
Exemplo n.º 26
0
 def apply(self, element):
     form = element.parent
     next = element.next  # this is 'b'
     prev = element.prev  # this is 'a'
     if is_identifier(next) or is_literal(next) or is_form(next):
         new_form_element = form.wrap(prev, next, Form)
     elif is_token(next, Tokens.BEGIN_MACRO):
         # a . BEGIN_MACRO something END_MACRO dont want => (. a BEGIN_MACRO) something END_MACRO
         # actually want
         # a . BEGIN_MACRO something END_MACRO => (. a BEGIN_MACRO something END_MACRO)
         new_form_element = form.wrap(prev, next.end, Form)
     else:
         raise ArrangementError(
             element.range.position_after,
             "Expected identifier, literal, form or begin-macro-token after '%s' in position %s."
             %
             (element.code.name, element.range.first_position.nameless_str))
     new_form = new_form_element.code
     # at this point new_form = ⦅a X b⦆
     new_form.remove(element)
     # at this point new_form = ⦅a b⦆
     new_form.prepend(element)
     # at this point new_form = ⦅X a b⦆
     return new_form_element.next  # return the next position to be read
Exemplo n.º 27
0
 def applies(self, element):
     #return is_token(element, Tokens.BEGIN_MACRO, token_text="“")
     return any([is_token(element, Tokens.BEGIN_MACRO, token_text=c) for c in self.str_delims])
Exemplo n.º 28
0
 def applies(self, element):
     return is_token(element, Tokens.BEGIN)
Exemplo n.º 29
0
 def applies(self, element):
     return is_token(element, Tokens.CONSTITUENT) and element.code is None
 def _is_begin_macro_token_immediately_after(next, element):
     return (is_token(next, Tokens.BEGIN_MACRO)
             and is_not_none(next, ".range.first_position.index")
             and element.code.range.position_after.index
             == next.range.first_position.index)
Exemplo n.º 31
0
 def applies(self, element):
     return is_token(element, Tokens.BEGIN_MACRO, token_text="##")
Exemplo n.º 32
0
    def apply(self, element: Element):

        node = element.code
        assert isinstance(node, Node)

        if len(node) < 2:
            return element.next

        first_small_group = None
        seen_colon = False
        seen_punctuation = False

        # 1. Check to make sure there are no punctuation tokens before the skip_count
        start_of_group = node[0]
        for i in range(self.skip_count):
            if is_token(start_of_group, Tokens.PUNCTUATION):
                seen_punctuation = True
                raise ArrangementError(start_of_group.range,
                                       "Unexpected punctuation '%s' before start of argument sequence." % start_of_group.value)
            start_of_group = start_of_group.next

        # 2. Remove leading break, if any
        while is_token(start_of_group, Tokens.ARGBREAK):
            seen_punctuation = True
            nxt = start_of_group.next
            node.remove(start_of_group)
            start_of_group = nxt

        # 2. The first token after skip_count cannot be punctuation, unless it's a colon ':'
        if is_token(start_of_group, Tokens.PUNCTUATION):
            seen_punctuation = True
            if start_of_group.value != ':':
                raise ArrangementError(start_of_group.range.first_position,
                                       "Unexpected punctuation '%s' at start of argument sequence." % start_of_group.value)
            else:
                seen_colon = True
                colon = start_of_group
                start_of_group = colon.next
                node.remove(colon)
                # and after this colon there should not be any punctuation
                if is_token(start_of_group, Tokens.PUNCTUATION) and not is_token(start_of_group, Tokens.ARGBREAK):
                    raise ArrangementError(start_of_group.range.first_position,
                                           "Unexpected punctuation '%s' after ':'." % start_of_group.value)
                # but there should be *something*
                elif start_of_group is None:
                    raise ArrangementError(colon.range, "Nothing after `:`!")

        # This function will wrap the last group of tokens in a PreSeq
        def finish_groups(last_element_in_group):
            nonlocal first_small_group
            if seen_punctuation and start_of_group not in [None, node[0], last_element_in_group] and last_element_in_group is not None:
                node.wrap(start_of_group, last_element_in_group, PreSeq)

        if start_of_group is None:
            return element.next

        # Iterate through all elements of the node, in search of ',' or ':' punctuation tokens, or ARGBREAK tokens
        # If there are two or more elements between such tokens, wrap them in a PreSeq
        # Stop if an INDENT token is found
        for punctuation_token in node.iterate_from(start_of_group):

            # an ARGBREAK token will wrap the previous tokens (from start_of_group to this point)
            if is_token(punctuation_token, Tokens.ARGBREAK):
                seen_punctuation = True
                if start_of_group is punctuation_token:
                    # ARGBREAKS after punctuation tokens are ignored
                    start_of_group = punctuation_token.next
                    node.remove(punctuation_token)
                else:
                    # wrap-previous-tokens(start_of_group, punctuation_token, PreSeq)
                    new_group = node.wrap(start_of_group, punctuation_token.prev, PreSeq)
                    if first_small_group is None:
                        first_small_group = new_group
                    start_of_group = punctuation_token.next
                    node.remove(punctuation_token)
            elif is_token(punctuation_token, Tokens.PUNCTUATION, ','):
                seen_punctuation = True
                if start_of_group is punctuation_token:
                    raise ArrangementError(punctuation_token.range.first_position,
                                           "Unexpected punctuation '%s'." % punctuation_token.value)
                # wrap-previous-tokens(start_of_group, punctuation_token, PreSeq)
                new_group = node.wrap(start_of_group, punctuation_token.prev, PreSeq)
                if first_small_group is None:
                    first_small_group = new_group
                start_of_group = punctuation_token.next
                node.remove(punctuation_token)

            elif is_token(punctuation_token, Tokens.PUNCTUATION, ':'):
                seen_punctuation = True
                if seen_colon:
                    raise ArrangementError(punctuation_token.range.first_position,
                                           "Argument sequence should have a single colon ':'.")
                if start_of_group is punctuation_token:
                    raise ArrangementError(punctuation_token.range.first_position,
                                           "Unexpected punctuation '%s'." % punctuation_token.value)
                seen_colon = True
                finish_groups(punctuation_token.prev)
                if first_small_group is not None:
                    node.wrap(first_small_group, punctuation_token.prev, PreSeq)
                start_of_group = punctuation_token.next
                node.remove(punctuation_token)
            elif is_token(punctuation_token, Tokens.PUNCTUATION):
                raise ArrangementError(punctuation_token.range.first_position,
                                       "Default punctuator cannot parse unknown punctuation token '%s'." % punctuation_token.value)
            elif is_token(punctuation_token, Tokens.INDENT):
                if start_of_group is not punctuation_token:
                    finish_groups(punctuation_token.prev)
                node.remove(punctuation_token)
                return element.next

        finish_groups(node.last)

        return element.next
Exemplo n.º 33
0
def is_opening_delimiter_among(element: Element, opening_delimiters: set):
    """
    Returns ``True`` if the given element is a ``BEGIN_MACRO`` token with ``text in opening_delimiters``.
    """
    return is_token(element,
                    Tokens.BEGIN_MACRO) and element.text in opening_delimiters
Exemplo n.º 34
0
def is_opening_delimiter_among(element:Element, opening_delimiters:set):
    """
    Returns ``True`` if the given element is a ``BEGIN_MACRO`` token with ``text in opening_delimiters``.
    """
    return is_token(element, Tokens.BEGIN_MACRO) and element.text in opening_delimiters
Exemplo n.º 35
0
 def applies(self, element):
     return is_token(element, Tokens.BEGIN)
 def _is_begin_macro_token_immediately_after(next, element):
     return (is_token(next, Tokens.BEGIN_MACRO) and
         is_not_none(next, ".range.first_position.index") and
         element.code.range.position_after.index == next.range.first_position.index)
Exemplo n.º 37
0
 def applies(self, element):
     #return is_token(element, Tokens.BEGIN_MACRO, token_text="“")
     return any([
         is_token(element, Tokens.BEGIN_MACRO, token_text=c)
         for c in self.str_delims
     ])
Exemplo n.º 38
0
 def applies(self, element):
     return is_token(element, Tokens.CONSTITUENT) and element.code is None
Exemplo n.º 39
0
def is_opening_delimiter(element:Element, opening_delimiter_str:str):
    """
    Returns ``True`` if the given element is a ``BEGIN_MACRO`` token with ``text == opening_delimiter_str``.
    """
    return is_token(element, Tokens.BEGIN_MACRO, token_text=opening_delimiter_str)
Exemplo n.º 40
0
 def applies(self, element):
     return is_token(element, Tokens.BEGIN) and \
            len(element.indents) > 0 and \
            (is_identifier(element.indents[0].prev, "=") or
             is_identifier(element.indents[0].prev, ":=")) and \
             element.next is not element.indents[0].prev
Exemplo n.º 41
0
def element_after(element) -> Element:
    if is_token(element, Tokens.BEGIN_MACRO) or is_token(
            element, Tokens.BEGIN):
        return element.end.next
    else:
        return element.next
Exemplo n.º 42
0
 def applies(self, element):
     return is_token(element, Tokens.BEGIN_MACRO, token_text="##")
Exemplo n.º 43
0
def element_after(element) -> Element:
    if is_token(element, Tokens.BEGIN_MACRO) or is_token(element, Tokens.BEGIN):
        return element.end.next
    else:
        return element.next