def test_token_assign_text():
    t = Token()
    t.text = 'test'
def test_token_assign_other():
    t = Token()
    t.blabla = 'test'
def test_token_assign_other():
    t = Token()
    t.blabla = 'test'
def test_token_assign_name():
    t = Token()
    t.name = 'test'
def invalid_token_test():
    lexer = LatexIncrementalDecoder()
    # piggyback an implementation which results in invalid tokens
    lexer.get_raw_tokens = lambda bytes_, final: [Token('**invalid**', bytes_)]
    nose.tools.assert_raises(AssertionError, lambda: lexer.decode(b'hello'))
def test_token_assign_text():
    t = Token()
    t.text = 'test'
def test_token_assign_name():
    t = Token()
    t.name = 'test'
def test_token_create_with_args():
    t = Token('hello', b'world')
    nose.tools.assert_equal(t.name, 'hello')
    nose.tools.assert_equal(t.text, b'world')
def test_token_create():
    t = Token()
    nose.tools.assert_equal(t.name, 'unknown')
    nose.tools.assert_equal(t.text, b'')