예제 #1
0
from tokenizer import (Tokenizer, word_consumer, wordlist_consumer,
                       regex_consumer, TokenStream, TokenParser)
from ast import parser
from scope import Scope
from lisplib import NATIVES
from argproc import ArgProc
from translate import translators
import sys

tokenizer = Tokenizer()
tokenizer.add_consumer("escaped_char", regex_consumer(r"^\\."))
tokenizer.add_consumer("open", word_consumer("("))
tokenizer.add_consumer("close", word_consumer(")"))
tokenizer.add_consumer("true", word_consumer("true"))
tokenizer.add_consumer("false", word_consumer("false"))
tokenizer.add_consumer("null", word_consumer("null"))
tokenizer.add_consumer(
    "word",
    regex_consumer(
        r"^(?:[a-zA-Z$_][a-zA-Z$_0-9]*|[!£%^&*-+=\[\]\{\}@'#~,<.>/?\\])"))
tokenizer.add_consumer("int", regex_consumer(r"^[0-9]+"))
tokenizer.add_consumer("float", regex_consumer(r"^[0-9]+\.[0-9]+"))
tokenizer.add_consumer("string", regex_consumer(r"^\"(?:[^\"\\]|\\.)*\""))
tokenizer.add_consumer("whitespace", regex_consumer(r"^\s+"))

global_scope = Scope()
global_scope.add_natives(NATIVES)


def filter_tokens(tokens):
    new_tokens = []