Esempio n. 1
0
    def test_reordered_simple_addition_repr(self):
        n0 = Node(Token(token_type=TokenType.identifier, value='print'))
        n1 = Node(Token(token_type=TokenType.operator, value=':'))
        n2 = Node(Token(token_type=TokenType.number, value='4'))
        n3 = Node(Token(token_type=TokenType.operator, value='+'))
        n4 = Node(Token(token_type=TokenType.number, value='1'))

        ast = AbstractSyntaxTree(n0)
        n0.add_child(n1)
        n1.add_child(n2)

        expect = dedent('''\
        Token(token_type=identifier, value='print')
         │
        Token(token_type=operator, value=':')
         │
        Token(token_type=number, value='4')
        ''')
        self.assertEqual(repr(ast), expect)

        ast.insert_parent_into_hierarchy(n3, n1)
        n3.add_child(n4)

        expect = dedent('''\
        Token(token_type=identifier, value='print')
         │
        Token(token_type=operator, value=':')
         │
        Token(token_type=operator, value='+')
         ├── Token(token_type=number, value='4')
         └── Token(token_type=number, value='1')
        ''')
        self.assertEqual(repr(ast), expect)
Esempio n. 2
0
    def get_all_tokens(self):
        """ Find all tokens in provided code and store them in token list. """
        word = ""
        begin_string = False
        i = 0

        while i < len(self.code):
            char = self.code[i]
            # Ignore white space
            if char in [' ', '\t', '\n'] and begin_string == False: 
                i = i + 1 
                word = "" 
                continue
            
            word = word + char
            if word in KEYWORDS and self.code[i + 1] in SYMBOLS + SKIPABLE:
                self.tokens.append(Token("keyword", word))
                word = ""
            elif char == '"' or begin_string: # Check for string
                if char == '"':
                    begin_string = not begin_string
                if not begin_string:
                    self.tokens.append(Token("stringConstant", word[1:-1]))
                    word = ""
            elif word in SYMBOLS:
                self.tokens.append(Token("symbol", word))
                word = ""
            elif self.code[i + 1] in SKIPABLE + SYMBOLS:
                if word.isdigit():
                    self.tokens.append(Token("integerConstant", word))
                else:
                    self.tokens.append(Token("identifier", word))
                word = ""
            i = i + 1
Esempio n. 3
0
 def _set_tokens_minter(self, minter, fa2, governance, nfts):
     token = Token(self.client)
     calls = [
         token.set_minter_call(fa2, minter),
         token.set_minter_call(governance, minter)
     ]
     calls.extend(
         [token.set_minter_call(v, minter) for (i, v) in nfts.items()])
     return calls
Esempio n. 4
0
 def multi_digit_number(self):
     """
     Construct multiple-digit numbers
     """
     result = ""
     while self.current_char is not None and (self.current_char.isdigit()
                                              or self.current_char == "."):
         result += self.current_char
         self.next()
     if "." in result:
         return Token(REAL_CONST, float(result))
     else:
         return Token(INTEGER_CONST, int(result))
Esempio n. 5
0
    def _action(self, robot_id, ui_id, robot_entity, next_mode):
        if next_mode == const.MODE_STANDBY:
            nws = robot_entity['navigating_waypoints']['value']

            if isinstance(nws, dict) and nws and 'action' in nws \
                    and 'func' in nws['action'] and nws['action']['func'] \
                    and 'token' in nws['action'] and nws['action']['token'] and isinstance(nws['action']['token'], str) \
                    and 'waiting_route' in nws['action']:
                func = nws['action']['func']
                token = Token.get(nws['action']['token'])
                waiting_route = nws['action']['waiting_route']
                if func == 'lock':
                    has_lock = token.get_lock(robot_id)
                    if has_lock:
                        self.move_next(robot_id, check=False)
                        self._send_token_info(ui_id, token, TokenMode.LOCK)
                    else:
                        if waiting_route:
                            self._take_refuge(robot_id, waiting_route)
                        self._send_token_info(ui_id, token, TokenMode.SUSPEND)
                elif func == 'release':
                    new_owner_id = token.release_lock(robot_id)
                    self.move_next(robot_id, check=False)
                    self._send_token_info(ui_id, token, TokenMode.RELEASE)
                    if new_owner_id:
                        self.move_next(new_owner_id, check=False)
                        self._send_token_info(const.ID_TABLE[new_owner_id],
                                              token, TokenMode.RESUME)
                        self._send_token_info(const.ID_TABLE[new_owner_id],
                                              token, TokenMode.LOCK)
Esempio n. 6
0
 def match_token(self, source, startPos):
     self.last_value = None
     token_match = self.re_token.match(source, startPos)
     if (token_match is not None and token_match.group() != ''):
         token = Token(self.token_type, token_match.group(0), startPos)
         return token
     return None
Esempio n. 7
0
 def __init__(self,
              shell="http://localhost:8732",
              key="edsk3QoqBuvdamxouPhin7swCvkQNgq4jP5KZPbwWNnwdZpSpJiEbq"):
     client: PyTezosClient = pytezos.using(key=key, shell=shell)
     self.minter = Minter(client)
     self.token = Token(client)
     self.quorum = Quorum(client)
     self.deploy = Deploy(client)
     self.governance = Governance(client)
Esempio n. 8
0
 def id_keywords(self):
     """
     Handle ids and reserved keywords
     """
     result = ""
     while self.current_char is not None and self.current_char.isalnum():
         result += self.current_char
         self.next()
     token = RESERVED_KEYWORDS.get(result, Token(ID, result))
     return token
Esempio n. 9
0
    def nyan_filter(self, status):
        token = Token()
        reader = Reader()
        api = token.get_key(reader.json_dir())

        print(status.text)
        text = status.text

        # for nyan in nyan_list:
        for nyan in open('./dictionary.txt', 'r'):
            nyan = nyan.replace('\n', '')
            print(nyan)
            if nyan in text:
                print("OUT!! Delete Tweet!! Nyan Nyan Filter Start Up!!")
                for tweet in tweepy.Cursor(api.user_timeline).items():
                    api.destroy_status(tweet.id)
                    break;
                api.update_status("にゃんにゃんフィルター発動!!\n" + datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
            else:
                print("No problem!!")
Esempio n. 10
0
    def test_simple_addition_to_hash_dict(self):
        n0 = Node(Token(token_type=TokenType.identifier, value='print'))
        n1 = Node(Token(token_type=TokenType.operator, value=':'))
        n2 = Node(Token(token_type=TokenType.number, value='4'))
        n3 = Node(Token(token_type=TokenType.operator, value='+'))
        n4 = Node(Token(token_type=TokenType.number, value='1'))

        ast = AbstractSyntaxTree(n0)
        n0.add_child(n1)
        n1.add_child(n3)
        n3.add_children([n2, n4])

        nodes = OrderedDict()
        nodes[n0] = [n1]
        nodes[n1] = [n3]
        nodes[n3] = [n2, n4]
        nodes[n2] = []
        nodes[n4] = []
        expect = get_hash_dict(nodes)
        self.assertEqual(ast.to_hash_dict(), expect)
Esempio n. 11
0
def pruneToken(nlptok, mainIndex):

    return Token(
        0,
        "".join(nlptok.string.split()),
        nlptok.lemma_,
        nlptok.pos_,
        "empty",
        "empty",
        [],
        mainIndex,
    )
    def get_next_token(self):
        """
        Lexical analyzer (also known as scanner or tokenizer)
        This method is responsible for breaking a sentence
        apart into tokens.
        """
        while self.current_char is not None:

            if self.current_char.isspace():
                self.skip_whitespace()
                continue

            if self.current_char.isdigit():
                return Token(Token.INTEGER, self.integer())

            if self.current_char == '+':
                self.advance()
                return Token(Token.PLUS, '+')

            if self.current_char == '-':
                self.advance()
                return Token(Token.MINUS, '-')

            if self.current_char == '*':
                self.advance()
                return Token(Token.MULTIPLY, '*')

            if self.current_char == '/':
                self.advance()
                return Token(Token.DIVIDE, '/')

            self.error('get_next_token - unknown char')

        return Token(Token.EOF, None)
Esempio n. 13
0
 def test_oneline_computation(self):
     text = "print: 4 * var + 1"
     lexer = Lexer()
     tokens = lexer.get_tokens(text)
     expect = [
         Token(token_type=TokenType.identifier, value='print'),
         Token(token_type=TokenType.operator, value=':'),
         Token(token_type=TokenType.number, value='4'),
         Token(token_type=TokenType.operator, value='*'),
         Token(token_type=TokenType.identifier, value='var'),
         Token(token_type=TokenType.operator, value='+'),
         Token(token_type=TokenType.number, value='1')]
     self.assertEqual(tokens, expect)
Esempio n. 14
0
 def test_print_computation(self):
     parser = Parser()
     tokens = [
         Token(token_type=TokenType.identifier, value='print'),
         Token(token_type=TokenType.operator, value=':'),
         Token(token_type=TokenType.number, value='4'),
         Token(token_type=TokenType.operator, value='*'),
         Token(token_type=TokenType.identifier, value='var'),
         Token(token_type=TokenType.operator, value='+'),
         Token(token_type=TokenType.number, value='1')
     ]
     ast = parser.parse_ast(tokens)
     pass
Esempio n. 15
0
 def test_parse_simple_addition_ast(self):
     t0 = Token(token_type=TokenType.identifier, value='print')
     t1 = Token(token_type=TokenType.operator, value=':')
     t2 = Token(token_type=TokenType.number, value='4')
     t3 = Token(token_type=TokenType.operator, value='+')
     t4 = Token(token_type=TokenType.number, value='1')
     tokens = [t0, t1, t2, t3, t4]
     parser = Parser()
     ast = parser.parse_ast(tokens)
     n0 = ast.root
     self.assertEqual(n0.token, t0)
     self.assertEqual(get_child_tokens(n0), [t1])
     n1 = ast.root.children[0]
     self.assertEqual(n1.token, t1)
     self.assertEqual(get_child_tokens(n1), [t3])
     n2 = n1.children[0]
     self.assertEqual(n2.token, t3)
     self.assertEqual(get_child_tokens(n2), [t2, t4])
     n3 = n2.children[0]
     self.assertEqual(n3.token, t2)
     self.assertEqual(get_child_tokens(n3), [])
     n4 = n2.children[1]
     self.assertEqual(n4.token, t4)
     self.assertEqual(get_child_tokens(n4), [])
Esempio n. 16
0
 def read_keyword(self):
     lexeme = self.text[self.pos]
     while self.pos + 1 < self.len and self.text[self.pos + 1].isalnum():
         lexeme += self.next_char()
     if lexeme == 'if':
         return Token(Class.IF, lexeme)
     elif lexeme == 'else':
         return Token(Class.ELSE, lexeme)
     elif lexeme == 'while':
         return Token(Class.WHILE, lexeme)
     elif lexeme == 'for':
         return Token(Class.FOR, lexeme)
     elif lexeme == 'break':
         return Token(Class.BREAK, lexeme)
     elif lexeme == 'continue':
         return Token(Class.CONTINUE, lexeme)
     elif lexeme == 'return':
         return Token(Class.RETURN, lexeme)
     elif lexeme == 'int' or lexeme == 'char' or lexeme == 'void':
         return Token(Class.TYPE, lexeme)
     return Token(Class.ID, lexeme)
Esempio n. 17
0
    def createTokenFromString(self, inp):
        li = inp.split(self.elSplit)
        index = int(li[0])
        string = li[1]
        lemma = li[2]
        tag = li[3]
        gender = []

        if li[4] != "set()" and li[4] != "empty":
            gen = (li[4].replace("{",
                                 "").replace("}",
                                             "").replace("'",
                                                         "").replace(" ", ""))
            gender = gen.split(",")

        number = []

        if li[5] != "set()" and li[5] != "empty":
            num = (li[5].replace("{",
                                 "").replace("}",
                                             "").replace("'",
                                                         "").replace(" ", ""))
            number = num.split(",")

        poc = []

        if li[6] != "[]":
            pocs = (li[6].replace("[",
                                  "").replace("]",
                                              "").replace("\n",
                                                          "").replace(" ", ""))
            pocs = pocs.split(",")
            for pc in pocs:
                poc.append(int(pc))

        mainIndex = li[7].replace("\n", "")

        return Token(index, string, lemma, tag, gender, number, poc, mainIndex)
Esempio n. 18
0
from src.token_type import *
from src.token import Token

RESERVED_KEYWORDS = {
    'PROGRAM': Token(PROGRAM, 'PROGRAM'),
    'VAR': Token(VAR, 'VAR'),
    'DIV': Token(INTEGER_DIV, 'DIV'),
    'INTEGER': Token(INTEGER, 'INTEGER'),
    'REAL': Token(REAL, 'REAL'),
    'BEGIN': Token(BEGIN, 'BEGIN'),
    'END': Token(END, 'END'),
    'PROCEDURE': Token(PROCEDURE, 'PROCEDURE')
}
"""
The tokenizer that will break the code down into a stream of tokens.
"""


class Tokenizer:
    def __init__(self, text):
        self.text = text
        self.pos = 0
        self.current_char = self.text[self.pos]
        self.tokens = []

    def tokenizer_error(self, detail):
        raise Exception(f"[ERROR]: {detail} is not defined")

    def next(self):
        """
        Move the pos pointer by 1 position and set the current_char
Esempio n. 19
0
 def create_tokens(self):
     """
     Initialize a steam of tokens from the user input
     :return: the set of tokens
     """
     while self.current_char:
         #handle comments
         if self.current_char == "@":
             self.next()
             self.skip_comments()
         #handle spaces
         elif self.current_char.isspace():
             self.skip_whitespaces()
         #handle keywords and ids
         elif self.current_char.isalpha():
             self.tokens.append(self.id_keywords())
         #handle numbers
         elif self.current_char.isdigit():
             self.tokens.append(self.multi_digit_number())
         # handle := assignment
         elif self.current_char == ":" and self.advance() == "=":
             self.next()
             self.next()
             self.tokens.append(Token(ASSIGN, ":="))
         #handle colon
         elif self.current_char == ":":
             self.next()
             self.tokens.append(Token(COLON, ":"))
         #handle comma
         elif self.current_char == ",":
             self.next()
             self.tokens.append(Token(COMMA, ","))
         #handle semi
         elif self.current_char == ";":
             self.next()
             self.tokens.append(Token(SEMI, ";"))
         #handle dot
         elif self.current_char == ".":
             self.next()
             self.tokens.append(Token(DOT, "."))
         #handle +, - , * , / and ()
         elif self.current_char == "+":
             self.next()
             self.tokens.append(Token(PLUS, "+"))
         elif self.current_char == "-":
             self.next()
             self.tokens.append(Token(MINUS, "-"))
         elif self.current_char == "*":
             self.next()
             self.tokens.append(Token(MULTIPLY, "*"))
         elif self.current_char == "/":
             self.next()
             self.tokens.append(Token(FLOAT_DIV, "/"))
         elif self.current_char == "(":
             self.next()
             self.tokens.append(Token(LPAREN, "("))
         elif self.current_char == ")":
             self.next()
             self.tokens.append(Token(RPAREN, ")"))
         else:
             print(self.current_char)
             self.tokenizer_error(self.current_char)
     self.tokens.append(Token(EOF, None))
     return self.tokens
Esempio n. 20
0
 def next_token(self):
     self.read_space()
     curr = self.next_char()
     if curr is None:
         return Token(Class.EOF, curr)
     token = None
     if curr.isalpha():
         token = self.read_keyword()
     elif curr.isdigit():
         token = Token(Class.INT, self.read_int())
     elif curr == '\'':
         token = Token(Class.CHAR, self.read_char())
     elif curr == '"':
         token = Token(Class.STRING, self.read_string())
     elif curr == '+':
         token = Token(Class.PLUS, curr)
     elif curr == '-':
         token = Token(Class.MINUS, curr)
     elif curr == '*':
         token = Token(Class.STAR, curr)
     elif curr == '/':
         token = Token(Class.FWDSLASH, curr)
     elif curr == '%':
         token = Token(Class.PERCENT, curr)
     elif curr == '&':
         curr = self.next_char()
         if curr == '&':
             token = Token(Class.AND, '&&')
         else:
             token = Token(Class.ADDRESS, '&')
             self.pos -= 1
     elif curr == '|':
         curr = self.next_char()
         if curr == '|':
             token = Token(Class.OR, '||')
         else:
             self.die(curr)
     elif curr == '!':
         curr = self.next_char()
         if curr == '=':
             token = Token(Class.NEQ, '!=')
         else:
             token = Token(Class.NOT, '!')
             self.pos -= 1
     elif curr == '=':
         curr = self.next_char()
         if curr == '=':
             token = Token(Class.EQ, '==')
         else:
             token = Token(Class.ASSIGN, '=')
             self.pos -= 1
     elif curr == '<':
         curr = self.next_char()
         if curr == '=':
             token = Token(Class.LTE, '<=')
         else:
             token = Token(Class.LT, '<')
             self.pos -= 1
     elif curr == '>':
         curr = self.next_char()
         if curr == '=':
             token = Token(Class.GTE, '>=')
         else:
             token = Token(Class.GT, '>')
             self.pos -= 1
     elif curr == '(':
         token = Token(Class.LPAREN, curr)
     elif curr == ')':
         token = Token(Class.RPAREN, curr)
     elif curr == '[':
         token = Token(Class.LBRACKET, curr)
     elif curr == ']':
         token = Token(Class.RBRACKET, curr)
     elif curr == '{':
         token = Token(Class.LBRACE, curr)
     elif curr == '}':
         token = Token(Class.RBRACE, curr)
     elif curr == ';':
         token = Token(Class.SEMICOLON, curr)
     elif curr == ',':
         token = Token(Class.COMMA, curr)
     else:
         self.die(curr)
     return token
Esempio n. 21
0
    def parse(self):
        self.parseStack.push("$")
        self.parseStack.push(NonTerminal.Program)
        while self.parseStack.peek() != "$":
            A = self.parseStack.peek()
            if self.scanner.peek() == "EOF":
                t = "EOF"
                tVal = "EOF"
            else:
                t = self.scanner.peek().getType()
                tVal = self.scanner.peek().getValue()
            # print("\nSemantic Stack:")
            # for node in self.semanticStack:
            #     print(type(node))
            # print("Top of Parse Stack:", A)
            # print("Next token:", t, "(", tVal, ")")

            if isinstance(A, TokenType):
                if A == t:
                    self.last = tVal
                    self.parseStack.pop()
                    self.scanner.next()
                else:
                    # If print is declared as a function identifier, recast print_statement token as identifier token
                    if not (A == TokenType.identifier
                            and t == TokenType.print_statement):
                        error_msg = "Parsing error: Expected {} but found {}"
                        raise ParseError(error_msg.format(A, t))
                    else:
                        self.scanner.replaceNext(
                            Token(TokenType.identifier, "print"))
            elif isinstance(A, NonTerminal):
                if (A, t) in parse_table:
                    self.parseStack.pop()
                    if "ε" in parse_table[(
                            A, t)]:  # rule is ε, push nothing onto stack
                        continue
                    else:
                        reversedRule = parse_table[(A, t)].copy()
                        reversedRule.reverse()
                        for y in reversedRule:
                            self.parseStack.push(y)
                else:
                    # If print is declared as a function identifier, recast print_statement token as identifier token
                    if not ((A == NonTerminal.Expr or A == NonTerminal.Term
                             or A == NonTerminal.Factor)
                            and t == TokenType.print_statement):
                        error_msg = "Parsing Error: No transition for {} from {}"
                        raise ParseError(error_msg.format(A, t))
                    else:
                        self.scanner.replaceNext(
                            Token(TokenType.identifier, "print"))
            elif issubclass(A, AST.ASTnode):
                self.parseStack.pop()
                self.semanticStack.push(A(self.last, self.semanticStack))
            else:
                error_msg = "Parsing Error: An unidentified object is on the stack: {}"
                raise ParseError(error_msg.format(A))

        # end of loop, program threw no errors
        if self.scanner.peek() != "EOF":
            error_msg = "Parsing Error: Code found after end of program."
            raise ParseError(error_msg)
        else:
            return self.semanticStack.pop()
Esempio n. 22
0
    def update_token(self):
        token = Token()
        recorder = Recorder()

        keys = token.key_submit()
        recorder.dir_json(keys)
Esempio n. 23
0
	def read_keyword(self):
		lexeme = self.text[self.pos]
		while self.pos + 1 < self.len and self.is_keyword(self.text[self.pos + 1]):
			lexeme += self.next_char()
		if lexeme == 'div':
			return Token(Class.DIV,lexeme)
		elif lexeme == 'mod':
			return Token(Class.MOD,lexeme)
		elif lexeme == 'not':
			return Token(Class.NOT,lexeme)
		elif lexeme == 'or':
			return Token(Class.OR,lexeme)
		elif lexeme == 'xor':
			return Token(Class.OR,lexeme)
		elif lexeme == 'and':
			return Token(Class.AND,lexeme)
		elif lexeme == 'begin':
			return Token(Class.BEGIN,lexeme)
		elif lexeme == 'end':
			return Token(Class.END,lexeme)
		elif lexeme == 'if':
			return Token(Class.IF,lexeme)
		elif lexeme == 'else':
			return Token(Class.ELSE,lexeme)
		elif lexeme == 'then':
			return Token(Class.THEN,lexeme)
		elif lexeme == 'for':
			return Token(Class.FOR,lexeme)
		elif lexeme == 'to':
			return Token(Class.TO,lexeme)
		elif lexeme == 'downto':
			return Token(Class.DOWNTO,lexeme)
		elif lexeme == 'do':
			return Token(Class.DO,lexeme)
		elif lexeme == 'while':
			return Token(Class.WHILE,lexeme)
		elif lexeme == 'break':
			return Token(Class.BREAK,lexeme)
		elif lexeme == 'continue':
			return Token(Class.CONTINUE,lexeme)
		elif lexeme == 'repeat':
			return Token(Class.REPEAT,lexeme)
		elif lexeme == 'until':
			return Token(Class.UNIIL,lexeme)
		elif lexeme == 'var':
			return Token(Class.VAR,lexeme)
		elif lexeme == 'of':
			return Token(Class.OF,lexeme)
		elif lexeme == 'procedure':
			return Token(Class.PROCEDURE,lexeme)
		elif lexeme == 'function':
			return Token(Class.FUNCTION,lexeme)
		elif lexeme == 'integer' or lexeme == 'char' or lexeme == 'string' or lexeme == 'real' or lexeme == 'boolean':
			return Token(Class.TYPE, lexeme)
		elif lexeme == 'array':
			return Token(Class.Array, lexeme)
		elif lexeme == 'exit':
			return Token(Class.Exit, lexeme)
		elif lexeme == 'true':
			return Token(Class.BOOL, True)
		elif lexeme == 'false':
			return Token(Class.BOOL, False)

		return Token(Class.ID, lexeme)
Esempio n. 24
0
	def next_token(self):
		self.read_space()
		curr = self.next_char()
		if curr is None:
			return Token(Class.EOF, curr)
		elif curr.isdigit():
			value = self.read_int()
			curr = self.next_char()
			if curr != '.':
				self.pos -= 1
				return Token(Class.INT, value)
			curr = self.next_char()
			if not curr.isdigit():
				self.pos -= 2
				return Token(Class.INT, value)
			mantisa = self.read_int()
			value = str(value) + '.' + str(mantisa)
			value = float(value)
			return Token(Class.Float, value)
		elif self.is_keyword(curr):
			return self.read_keyword()
		elif curr == '\'':
			text = self.read_char()
			if(len(text) > 1):
				return Token(Class.STRING, text)
			if(len(text) == 1):
				return Token(Class.CHAR, text[0])
			return Token(Class.CHAR,'')
		elif curr == '"':
			return Token(Class.STRING, self.read_string())
		elif curr == ':':
			curr = self.next_char()
			if curr == '=':
					return Token(Class.ASSIGN, ':=')
			self.pos -= 1
			return Token(Class.Colon, ':')
		elif curr == '+':
			return Token(Class.PLUS, curr)
		elif curr == '-':
			return Token(Class.MINUS, curr)
		elif curr == '*':
			return Token(Class.STAR, curr)
		elif curr == '/':
			return Token(Class.FWDSLASH, curr)
		elif curr == '=':
			return Token(Class.EQ, curr)
		elif curr == '<':
			curr = self.next_char()
			if curr == '>':
					return Token(Class.NEQ, '<>')
			elif curr == '=':
					return Token(Class.LTE, '<=')
			self.pos -= 1
			return Token(Class.LT, '<')
		elif curr == '>':
			curr = self.next_char()
			if curr == '=':
					return Token(Class.GTE, '>=')
			self.pos -= 1
			return Token(Class.GT, '>')
		elif curr == '(':
			return Token(Class.LPAREN, curr)
		elif curr == ')':
			return Token(Class.RPAREN, curr)
		elif curr == '[':
			return Token(Class.LBRACKET, curr)
		elif curr == ']':
			return Token(Class.RBRACKET, curr)
		elif curr == ';':
			return Token(Class.SEMICOLON, curr)
		elif curr == ',':
			return Token(Class.COMMA, curr)
		elif curr == '.':
			curr = self.next_char();
			if curr == '.':
				return Token(Class.DOTDOT, '..')
			self.pos -= 1
			return Token(Class.DOT, '.')
		self.die(curr)