Exemplo n.º 1
0
	def _testParsing(self, st, *L):
		'''Tests that parsing a particular string all at once will return the given
		sequence of tokens. The Wait token at the end is implicit.'''
		# TODO: Test parsing it character-by-character too!
		#print 'Testing '+repr(st)
		L = self._collapseText(list(L)) + [WaitToken()]
		p = Tokenizer()
		p.queueData(st)
		resultToks = []
		while 1:
			tok = p.getNextToken()
			resultToks.append(tok)
			if isinstance(tok, WaitToken):
				break
		resultToks = self._collapseText(resultToks)
		self.assertEqual(len(L), len(resultToks), "Error parsing '%s': Expected token list %s, got %s" % \
							(repr(st), repr(L), repr(resultToks)))
		for tok,result in zip(L,resultToks):
			self.assertEqual(tok, result, \
								"Error parsing '%s': Expected %s but got %s" % \
							 	(repr(st), repr(L), repr(resultToks)))
		self.assertEqual(p.getNextToken(), WaitToken(),
							'When waiting, repeated calls to getNextToken() should '+\
							'continue to return WaitForMoreDataTokens')
Exemplo n.º 2
0
class LineParser(object):
	"""
	Assembles tokens into lines composed of TextChunks.
	This takes care of giving each TextChunk a correct TextStyle
	based on the flags present in the coloring tokens.
	"""

	def __init__(self):
		self._tokenizer = Tokenizer()
		self._incompleteLine = []
		self._completeLines = []
		self._reset()

	def _reset(self):
		"""
		Resets text style info.
		"""
		self._style = TextStyle()


	def getLine(self):
		"""
		Gets a line from the tokenizer.
		This will be None if there isn't one ready, otherwise it
		will return a list of Chunks. The newline character will
		not be included in the last chunk.
		"""
		if len(self._completeLines) > 0:
			return self._completeLines.pop(0)
		else:
			return None
		
	def queueData(self, data):
		"""
		Queues a chunk of data and parses it for line retrieval later.
		"""
		assert isinstance(data, str), "data must be string"
		self._tokenizer.queueData(data)
		# Tokenize now...
		while 1:
			tok = self._tokenizer.getNextToken()
			#print tok.__class__, self._incompleteLine
			if isinstance(tok, WaitToken):
				break
			elif isinstance(tok, TextToken):
				self._incompleteLine.append(TextChunk(tok.text, self._style))
			elif isinstance(tok, AnsiCodeToken):
				if tok.number == 0:
					self._reset()
				elif tok.number == 1:
					self._style = self._style.clone(bold=True)
				elif tok.number >= 30 and tok.number <= 37:
					colorNum = tok.number - 30
					self._style = self._style.clone(color=AnsiColor(colorNum))
			elif isinstance(tok, TA2Token):
				flags = tok.getFlags()
				setting = (flags & ATT_SET)!=0
				if flags & ATT_BOLD:
					self._style = self._style.clone(bold=setting)	
				if flags & ATT_ITALIC:
					self._style = self._style.clone(italic=setting)
				if flags & ATT_UNDERLINE:
					self._style = self._style.clone(underline=setting)
				if flags & ATT_STRIKETHROUGH:
					self._style = self._style.clone(strikethrough=setting)
				if flags & ATT_COLOR:
					if setting:
						self._style = \
							self._style.clone(color=TA2Color('#%06x'%tok.getColor()))
					else:
						self._style = \
							self._style.clone(color=None)
				# TODO: Actually test this
				# TODO: This will be really slow if someone were to post a line
				#		in Japanese, for example -- every single character
				#		would result in a discrete text chunk!
				#if flags & ATT_UNICODE:
				#	text = chr(tok.getUnicode())
				#	#self._incompleteLine.append(TextChunk(text, self._style))
			elif isinstance(tok, NewlineToken):
				self._completeLines.append(self._incompleteLine)
				self._incompleteLine = []
			elif isinstance(tok, XmlToken):
				self._incompleteLine.append(XmlChunk(tok.xml))