def testconsumeintegers(self):
    # this test only tests the failures in the integer parsing methods as well
    # as the '0' special cases.
    int64_max = (1 << 63) - 1
    uint32_max = (1 << 32) - 1
    text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
    tokenizer = text_format._tokenizer(text)
    self.assertraises(text_format.parseerror, tokenizer.consumeuint32)
    self.assertraises(text_format.parseerror, tokenizer.consumeuint64)
    self.assertequal(-1, tokenizer.consumeint32())

    self.assertraises(text_format.parseerror, tokenizer.consumeuint32)
    self.assertraises(text_format.parseerror, tokenizer.consumeint32)
    self.assertequal(uint32_max + 1, tokenizer.consumeint64())

    self.assertraises(text_format.parseerror, tokenizer.consumeint64)
    self.assertequal(int64_max + 1, tokenizer.consumeuint64())
    self.asserttrue(tokenizer.atend())

    text = '-0 -0 0 0'
    tokenizer = text_format._tokenizer(text)
    self.assertequal(0, tokenizer.consumeuint32())
    self.assertequal(0, tokenizer.consumeuint64())
    self.assertequal(0, tokenizer.consumeuint32())
    self.assertequal(0, tokenizer.consumeuint64())
    self.asserttrue(tokenizer.atend())
  def testconsumebytestring(self):
    text = '"string1\''
    tokenizer = text_format._tokenizer(text)
    self.assertraises(text_format.parseerror, tokenizer.consumebytestring)

    text = 'string1"'
    tokenizer = text_format._tokenizer(text)
    self.assertraises(text_format.parseerror, tokenizer.consumebytestring)

    text = '\n"\\xt"'
    tokenizer = text_format._tokenizer(text)
    self.assertraises(text_format.parseerror, tokenizer.consumebytestring)

    text = '\n"\\"'
    tokenizer = text_format._tokenizer(text)
    self.assertraises(text_format.parseerror, tokenizer.consumebytestring)

    text = '\n"\\x"'
    tokenizer = text_format._tokenizer(text)
    self.assertraises(text_format.parseerror, tokenizer.consumebytestring)
 def testconsumebool(self):
   text = 'not-a-bool'
   tokenizer = text_format._tokenizer(text)
   self.assertraises(text_format.parseerror, tokenizer.consumebool)
  def testsimpletokencases(self):
    text = ('identifier1:"string1"\n     \n\n'
            'identifier2 : \n \n123  \n  identifier3 :\'string\'\n'
            'identifier_4 : 1.1e+2 id5:-0.23 id6:\'aaaa\\\'bbbb\'\n'
            'id7 : "aa\\"bb"\n\n\n\n id8: {a:inf b:-inf c:true d:false}\n'
            'id9: 22 id10: -111111111111111111 id11: -22\n'
            'id12: 2222222222222222222 id13: 1.23456f id14: 1.2e+2f '
            'false_bool:  0 true_bool:t \n true_bool1:  1 false_bool1:f ' )
    tokenizer = text_format._tokenizer(text)
    methods = [(tokenizer.consumeidentifier, 'identifier1'),
               ':',
               (tokenizer.consumestring, 'string1'),
               (tokenizer.consumeidentifier, 'identifier2'),
               ':',
               (tokenizer.consumeint32, 123),
               (tokenizer.consumeidentifier, 'identifier3'),
               ':',
               (tokenizer.consumestring, 'string'),
               (tokenizer.consumeidentifier, 'identifier_4'),
               ':',
               (tokenizer.consumefloat, 1.1e+2),
               (tokenizer.consumeidentifier, 'id5'),
               ':',
               (tokenizer.consumefloat, -0.23),
               (tokenizer.consumeidentifier, 'id6'),
               ':',
               (tokenizer.consumestring, 'aaaa\'bbbb'),
               (tokenizer.consumeidentifier, 'id7'),
               ':',
               (tokenizer.consumestring, 'aa\"bb'),
               (tokenizer.consumeidentifier, 'id8'),
               ':',
               '{',
               (tokenizer.consumeidentifier, 'a'),
               ':',
               (tokenizer.consumefloat, float('inf')),
               (tokenizer.consumeidentifier, 'b'),
               ':',
               (tokenizer.consumefloat, -float('inf')),
               (tokenizer.consumeidentifier, 'c'),
               ':',
               (tokenizer.consumebool, true),
               (tokenizer.consumeidentifier, 'd'),
               ':',
               (tokenizer.consumebool, false),
               '}',
               (tokenizer.consumeidentifier, 'id9'),
               ':',
               (tokenizer.consumeuint32, 22),
               (tokenizer.consumeidentifier, 'id10'),
               ':',
               (tokenizer.consumeint64, -111111111111111111),
               (tokenizer.consumeidentifier, 'id11'),
               ':',
               (tokenizer.consumeint32, -22),
               (tokenizer.consumeidentifier, 'id12'),
               ':',
               (tokenizer.consumeuint64, 2222222222222222222),
               (tokenizer.consumeidentifier, 'id13'),
               ':',
               (tokenizer.consumefloat, 1.23456),
               (tokenizer.consumeidentifier, 'id14'),
               ':',
               (tokenizer.consumefloat, 1.2e+2),
               (tokenizer.consumeidentifier, 'false_bool'),
               ':',
               (tokenizer.consumebool, false),
               (tokenizer.consumeidentifier, 'true_bool'),
               ':',
               (tokenizer.consumebool, true),
               (tokenizer.consumeidentifier, 'true_bool1'),
               ':',
               (tokenizer.consumebool, true),
               (tokenizer.consumeidentifier, 'false_bool1'),
               ':',
               (tokenizer.consumebool, false)]

    i = 0
    while not tokenizer.atend():
      m = methods[i]
      if type(m) == str:
        token = tokenizer.token
        self.assertequal(token, m)
        tokenizer.nexttoken()
      else:
        self.assertequal(m[1], m[0]())
      i += 1