def _process(L):
        stack = []
        output = []

        i = 0
        while i < len(L):
            token = L[i]
            t = token.type
            v = token.value
            if t in LITERAL_TYPES or t == TYPE_TERM:
                output.append(token)
            elif token.type == TYPE_FUNCTION:
                stack.append(token)
            elif t == TYPE_SEPARATOR:
                while len(stack) > 0 and stack[-1].type != TYPE_SEPARATOR:
                    output.append(stack.pop())
            elif token.type == TYPE_OPERATOR:
                while True:
                    if len(stack) > 0:
                        token2 = stack[-1]
                        t2 = token2.type
                        v2 = token2.value
                        if t2 == TYPE_OPERATOR:
                            ass = v.associativity
                            pres = v2.precedence - v.precedence
                            if ((ass == LEFT_TO_RIGHT and pres <= 0)
                                    or (ass == RIGHT_TO_LEFT and pres < 0)):
                                output.append(stack.pop())
                            else:
                                break
                        else:
                            break
                    else:
                        break

                stack.append(token)
            elif t == TYPE_BLOCK_START:
                #we need to get the index of the end of this block
                end_index = find_endblock_token_index(L, i + 1)
                result = Shunter._process(L[i + 1:end_index])
                output.append(token)
                output += result
                output.append(L[end_index])
                i = end_index
                if token.value == TUPLE_START_CHAR and len(
                        stack) > 0 and stack[-1].type == TYPE_FUNCTION:
                    output.append(stack.pop())
            else:
                print 'uh oh...', token
            i += 1

        while len(stack) > 0:
            output.append(stack.pop())

        return output
	def process(tokenlist):
		# First we find all instances of the "sub" term and replace them and their name with a TYPE_SUBROUTINE.
		# We also do something similar to the "call" term.
		i = 0
		while i < len(tokenlist.tokens):
			token = tokenlist.tokens[i]
			t = token.type
			v = token.value

			if t == TYPE_TERM :
				if v == "sub":
					# we look for a term following this.
					subname_token = None
					if i+1 < len(tokenlist.tokens):
						subname_token = tokenlist.tokens[i+1]
					if subname_token is None or subname_token.type != TYPE_TERM:
						error_format(token, "Sub is expected to be followed by the name of the subroutine.")

					# We remove the sub token.
					tokenlist.tokens.pop(i)
					decrement_gotos_pointing_after_here(tokenlist, i)

					# We then change the type of the second token to TYPE_SUBROUTINE.
					subname_token.type = TYPE_SUBROUTINE

					# We expect to find a block following this second token.
					blockstart_token = None
					if i + 1 < len(tokenlist.tokens):
						blockstart_token = tokenlist.tokens[i + 1]
					print blockstart_token
					if blockstart_token is None or blockstart_token.type != TYPE_BLOCK_START or blockstart_token.value != BLOCK_START_CHAR:
						error_format(token, "Sub is expected to be followed by the name of the subroutine and then the block to run.")

					# We hop to the end of the block and check if there is a return.
					# If there isn't, then we add one in.
					index = find_endblock_token_index(tokenlist.tokens, i + 1)

					last_token = tokenlist.tokens[ index - 1 ]
					if last_token.type != TYPE_TERM or last_token.value != "return":
						tokenlist.tokens.insert( index - 1, Token(TYPE_TERM, "return", None, None, 0))
						increment_gotos_pointing_after_here(tokenlist, index)
			i += 1

		# We also do something similar to the "call" term.
		i = 0
		while i < len(tokenlist.tokens):
			token = tokenlist.tokens[i]
			t = token.type
			v = token.value

			if t == TYPE_TERM:
				if v == "call":
					# we look for a term following this.
					subname_token = None
					if i + 1 < len(tokenlist.tokens):
						subname_token = tokenlist.tokens[i + 1]
					if subname_token is None or subname_token.type != TYPE_TERM:
						error_format(token, "Call is expected to be followed by the name of the subroutine.")

					# We remove the call token.
					tokenlist.tokens.pop(i)
					decrement_gotos_pointing_after_here(tokenlist, i)
					i -= 1

					# We then change the type of the second token to TYPE_GOSUB.
					subname_token.type = TYPE_GOSUB
			i += 1

		# Next we generate a dictionary of locations for the subroutines.
		subroutine_locations = {}
		i = 0
		while i < len(tokenlist.tokens):
			token = tokenlist.tokens[i]
			t = token.type
			v = token.value

			if t == TYPE_SUBROUTINE:
				subroutine_locations[v] = i+2
			i += 1

		# Finally we go through and change all of the TYPE_GOSUB values to their appropriate locations.
		i = 0
		while i < len(tokenlist.tokens):
			token = tokenlist.tokens[i]

			if token.type == TYPE_GOSUB:
				if token.value in subroutine_locations:
					token.value = subroutine_locations[token.value]
				else:
					error_format(token, "Call points to a nonexistant subroutine.")
			i += 1
Beispiel #3
0
    def process(tokenlist):
        #first we go though and remove every pseudo-function's argument-count literal.
        pseudo_functions = ("while", "if")

        i = 0
        while i < len(tokenlist.tokens):
            token = tokenlist.tokens[i]
            t = token.type
            if t == TYPE_FUNCTION:
                v = token.value
                if v in pseudo_functions:
                    tokenlist.tokens.pop(i - 1)
                    i -= 1
            i += 1

        # then we go through and add all needed goto's.
        i = 0
        while i < len(tokenlist.tokens):
            token = tokenlist.tokens[i]
            t = token.type

            if t == TYPE_FUNCTION:
                v = token.value
                if v == "if":
                    # first we must determine that there is in fact a body following this function.
                    if i+1 < len(tokenlist.tokens) and tokenlist.tokens[i+1].type == TYPE_BLOCK_START and \
                     tokenlist.tokens[i+1].value == BLOCK_START_CHAR:
                        pass
                    else:
                        error_format(token,
                                     "\"if\" should be followed by a block.")

                    # only if after the body following this if-function has an "else" will this goto be added.
                    index = find_endblock_token_index(tokenlist.tokens, i + 1)
                    if index + 1 < len(tokenlist.tokens):
                        token2 = tokenlist.tokens[index + 1]
                        t2 = token2.type
                        v2 = token2.value
                        if t2 == TYPE_TERM and v2 == "else":
                            # we're looking for where this "if" layer ends, so we aim for just below it.
                            end_of_chain = find_endblock_token_index(
                                tokenlist.tokens, i, 1)
                            if end_of_chain is None:
                                end_of_chain = len(tokenlist.tokens)
                            tokenlist.tokens.insert(
                                index,
                                Token(TYPE_GOTO, end_of_chain, None, None, 0))
                            increment_gotos_pointing_after_here(
                                tokenlist, index)

                elif v == "while":
                    # first we must determine that there is in fact a body following this function.
                    if i + 1 < len(tokenlist.tokens) and tokenlist.tokens[i + 1].type == TYPE_BLOCK_START and \
                        tokenlist.tokens[i + 1].value == BLOCK_START_CHAR:
                        pass
                    else:
                        error_format(
                            token, "\"while\" should be followed by a body.")

                    # Next we place a goto at the end of that body to point back at this while-function's args.
                    index = find_endblock_token_index(tokenlist.tokens, i + 1)
                    goto = find_startblock_token_index(tokenlist.tokens, i - 1)
                    tokenlist.tokens.insert(
                        index, Token(TYPE_GOTO, goto, None, None, 0))
                    increment_gotos_pointing_after_here(tokenlist, index)
            i += 1

        # next we go through and remove all of the else tokens that aren't followed by a block.
        # Those that are will be removed also, but that block's start and end will be removed as well.
        i = 0
        while i < len(tokenlist.tokens):
            token = tokenlist.tokens[i]
            t = token.type
            if t == TYPE_TERM:
                v = token.value
                if v == "else":
                    tokenlist.tokens.pop(i)
                    decrement_gotos_pointing_after_here(tokenlist, i)
                    i -= 1
                    if i + 1 < len(tokenlist.tokens) and tokenlist.tokens[
                            i +
                            1].type == TYPE_BLOCK_START and tokenlist.tokens[
                                i + 1].value == BLOCK_START_CHAR:
                        end_index = find_endblock_token_index(
                            tokenlist.tokens, i + 1)
                        tokenlist.tokens.pop(i + 1)
                        decrement_gotos_pointing_after_here(tokenlist, i + 1)
                        i -= 1
                        end_index -= 1
                        tokenlist.tokens.pop(end_index)
                        i -= 1
                        end_index -= 1
                        decrement_gotos_pointing_after_here(
                            tokenlist, end_index)
            i += 1

        # lastly we go through and remove all of the pseudo-functions.
        i = 0
        while i < len(tokenlist.tokens):
            token = tokenlist.tokens[i]
            t = token.type
            if t == TYPE_FUNCTION:
                v = token.value
                if v in pseudo_functions:
                    tokenlist.tokens.pop(i)
                    decrement_gotos_pointing_after_here(tokenlist, i)
                    i -= 1
            i += 1