Ejemplo n.º 1
0
	def GetSpeech(self, useTextAreas=False):
		speech = ''
		print 'Speak now'
		util.beep()
		if useTextAreas and len(self.textAreas) > 0:
			areaText = [a.text for a in self.textAreas]
			speech = self.speech.listen(phrases=areaText)
		else:
			speech = self.speech.listen()
		if speech is not None and speech != '':
			print 'You said: %s' % speech
			# search for a match
			areaText = [a.text for a in self.textAreas]
			found = False
			for area in areaText:
				if area.text.startswith(speech):
					self.speech.Say('Locating target %s' % area.link.text)					
					self.directionMode = True					
					self.tracker.setTarget(util.Centroid(area.link.box), area.link.text and not self.directionMode)
        plt.hist([m['genant_0'] for m in all_metadata], bins=25)
        plt.legend(['#vertebra with genant 0'])
        savefig('./img/statistics/genant_0_{0}'.format(ds_name))
        plt.clf()
        plt.hist([m['genant_1'] for m in all_metadata], bins=25)
        plt.legend(['#vertebra with genant 1'])
        savefig('./img/statistics/genant_1_{0}'.format(ds_name))
        plt.clf()
        plt.hist([m['genant_2'] for m in all_metadata], bins=25)
        plt.legend(['#vertebra with genant 2'])
        savefig('./img/statistics/genant_2_{0}'.format(ds_name))
        plt.clf()
        plt.hist([m['genant_3'] for m in all_metadata], bins=25)
        plt.legend(['#vertebra with genant 3'])
        savefig('./img/statistics/genant_3_{0}'.format(ds_name))
        plt.clf()


if __name__ == '__main__':
    dummy_computation(memory_control)
    # noinspection PyBroadException
    try:
        generate_statistics()
    except Exception:
        print_exc_plus()
        exit(-1)
    finally:
        frequency = 2000
        duration = 500
        util.beep(frequency, duration)
Ejemplo n.º 3
0
def main():
	global ocrResults
	logger.debug('Started')	
	speech = speechManager.SpeechManager()
	
	# create the images we need
	imSize = (camSmall[1],camSmall[0]) if rotate == -90 or rotate == 90 else (camSmall[0],camSmall[1])
	
	imgCopy = cv.CreateImage(imSize, vidDepth, 3) # a rotated copy
	imgYCC = cv.CreateImage(imSize, vidDepth, 3) # ycc for skin
	imgSkin = cv.CreateImage(imSize, vidDepth, 1)
	imgFG = cv.CreateImage(imSize, vidDepth, 1)
	imgHSV = cv.CreateImage(imSize, vidDepth, 3)
	imgGray = cv.CreateImage(imSize, vidDepth, 1)
	imgEdge = cv.CreateImage(imSize, vidDepth, 1)
	imgFinger = cv.CreateImage(imSize, vidDepth, 1)
	
	# big images
	bigRot = (bigClip[3],bigClip[2]) if rotate == -90 or rotate == 90 else (bigClip[2],bigClip[3])
	imgBigCropped = cv.CreateImage((bigClip[2],bigClip[3]), vidDepth, 3)
	imgBigRotated = cv.CreateImage(bigRot, vidDepth, 3)
	
	# set up cam
	camera = cv.CaptureFromCAM(0)
	cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_WIDTH, camSmall[X])
	cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_HEIGHT, camSmall[Y])
	# SetResolution(camSmall)
	
	img = cv.QueryFrame(camera)
	util.RotateImage(img, imgCopy, rotate)
	
	cv.NamedWindow(windowTitle, 1) 
	counter = 0
	
	# before we do anything, create our background model
	bgModel = bg2.BackgroundModel(imSize[0], imSize[1], fitThreshold=fitThreshold, shadowColorDiff=shadowColorDiff, shadowMaxDarken=shadowMaxDarken)
		 
	for i in range(0, trainImages):
		smFrame = cv.QueryFrame(camera)
		util.RotateImage(smFrame, imgCopy, rotate)
		bg2.FindBackground(imgCopy, imgFG, bgModel)
		cv.ShowImage(windowTitle, imgFG)
		cv.WaitKey(10)
	
	# keep state here
	documentOnTable = False
	doingOcr = False
	ocrDone = False
	accumulator = 0
	documentCorners = [] # corners in small view
	aspectRatio = None
	transform = None
	transformInv = None
	docWidth = None
	docHeight = None
	bigBoxes = []
	smallBoxes = []
	overlays = []
	overlayIndex = [] # map overlay to box index
	tracking = False
	trackingTarget = None
	finger = None
	lastTouched = None
	colorMode = False
	lastColor = None
	tableMode = False
	topRow = []
	leftRow = []
	showBG = False
	showSkin = False
	
	dicto = dict.DictionaryManager('dict/wordfreq.txt','dict/userdict.txt')
	
	# main loop
	while True:
		key = cv.WaitKey(10) # key handler
		if key == 27: break
		char = None if key == -1 else chr(key)
		
		# get image
		img = cv.QueryFrame(camera)
		util.RotateImage(img, imgCopy, rotate)
		util.GetYCC(imgCopy,imgYCC)
		bg2.FindBackground(imgCopy, imgFG, bgModel, update=0,findShadow=findShadow)
		# for i in range(0, erodeIterations):
		#  	cv.Erode(imgFG,imgFG)
		for i in range(0, erodeIterations):
		  	cv.Dilate(imgFG,imgFG)
		element = cv.CreateStructuringElementEx(3,3,1,1,cv.CV_SHAPE_RECT)				

		bg2.FindSkin(imgYCC,imgSkin,doCleanup=False,showMaybeSkin=True)
		
		if ocrDone: 
			pass
			cv.And(imgFG,imgSkin,imgFG)
			#cv.MorphologyEx(imgFG,imgFG,None,element,cv.CV_MOP_CLOSE, erodeIterations)
			#cv.MorphologyEx(imgFG,imgFG,None,element,cv.CV_MOP_OPEN, erodeIterations)
		
		
		### STEP ONE: DETECT DOCUMENT
		# now, start looking for documents
		if not doingOcr and not documentOnTable:
			if len(sys.argv) > 1:
				smallBoxes, documentCorners, aspectRatio, ocrResults, docWidth, docHeight, transform, transformInv = LoadState(sys.argv[1])
				doingOcr = True
				ocrDone = True
				documentOnTable = True
				print 'Loaded'
			
			util.GetGrayscale(imgCopy, imgGray)
			util.GetEdge(frame=imgGray, edge=imgEdge)
			rect = util.FindLargestRectangle(imgFG, imgGray)
			if len(rect) == 4 and util.BoundingRectArea(rect) > 25000:
				accumulator += 1
				if accumulator >= 60:
					accumulator = 0
					speech.Say('Document detected')
					documentOnTable = True
					documentCorners = rect
			else:
				accumulator = 0
		# if we have a document, wait for it to go away
		elif not doingOcr and documentOnTable:
			util.GetGrayscale(imgCopy, imgGray)
			util.GetEdge(frame=imgGray, edge=imgEdge)
			rect = util.FindLargestRectangle(imgFG, imgGray)
			if len(rect) != 4 or util.BoundingRectArea(rect) < 25000:
				accumulator += 1
				if accumulator >= 30:
					speech.Say('Document removed')
					documentOnTable = False
					documentCorners = []
					ocrDone = False
					accumulator = 0
		
		### STEP TWO: IF THE USER PRESSES THE O KEY, DO OCR
		if documentOnTable and not doingOcr and not ocrDone and char == ' ':
			speech.Say('Starting OCR')
			char = -1
			
			aspectRatio = GetAspectRatio(documentCorners)
			imgRect, transform, transformInv = CreateTransform(documentCorners, imgCopy, aspectRatio)

			# we'll use this later, for overlays
			docWidth = imgRect.width
			docHeight = imgRect.height
						
			accumulator = 0
			doingOcr = True
			timestamp = int(time.time()*1000)

			# save small image
			cv.SaveImage('logs/small-%d.png' % timestamp, imgCopy)
			cv.SaveImage('logs/smallrect-%d.png' % timestamp, imgRect)
			
			# get big image
			print 'Getting big image'
			cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_WIDTH, camLarge[X])
			cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_HEIGHT, camLarge[Y])
			
			# get a couple frames to warm up
			for i in range(0,10):
				 bigFrame = cv.QueryFrame(camera)
				 cv.WaitKey(10)
			
			bigFrame = cv.QueryFrame(camera)
			cv.SaveImage('logs/big-%d.png' % timestamp, bigFrame)
			
			# crop big image
			cropRegion = cv.GetSubRect(bigFrame, bigClip)
			cv.Copy(cropRegion, imgBigCropped)
			cv.SaveImage('logs/bigcropped-%d.png' % timestamp, imgBigCropped)
			
			# rotate big image
			util.RotateImage(imgBigCropped, imgBigRotated, rotate)
			cv.SaveImage('logs/bigrotated-%d.png' % timestamp, imgBigRotated)
			
			# rectify big image
			scaledCorners = [(p[0]*bigScale,p[1]*bigScale) for p in documentCorners]
			bigRectified, bigTransform, bigTransformInv = CreateTransform(scaledCorners, imgBigRotated, aspectRatio)
			cv.SaveImage('logs/bigrectified-%d.png' % timestamp, bigRectified)
			
			# get text regions
			print 'Getting text regions'
			ocr.ClearOCRTempFiles()
			bigBoxes = FindTextAreas(bigRectified)
									
			# start OCR of text regions
			ocrResults.clear()
			
			bigGray = util.GetGrayscale(bigRectified)
			
			# do the OCR for each box
			for i in range(0, len(bigBoxes)):
				ocr.CreateTempFile(bigGray,bigBoxes[i],i)

			pool = multiprocessing.Pool(4)
			for i in range(0, len(bigBoxes)):
				ocrResults[i] = None
				pool.apply_async(ocr.CallOCREngine, args = (i,'./ocrtemp/',ocrEngine), callback = ocrCallback)
			pool.close()
			pool.join()
			
			# now we are done with OCR
			# restore small image
			cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_WIDTH, camSmall[X])
			cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_HEIGHT, camSmall[Y])
			
			# get a couple frames to warm up
			for i in range(0,3):
				 smFrame = cv.QueryFrame(camera)
				 cv.WaitKey(10)
				 
			# retrain bg to hide the document
			for i in range(0, trainImages):
				smFrame = cv.QueryFrame(camera)
				util.RotateImage(smFrame, imgCopy, rotate)
				bg2.FindBackground(imgCopy, imgFG, bgModel)
				cv.ShowImage(windowTitle, imgFG)
				cv.WaitKey(10)
			
		### STEP THREE: WAIT FOR OCR TO HAPPEN
		if doingOcr and not ocrDone:
			# did we recognize everything?
			done = None not in ocrResults.values()
			if done: 
				ocrDone = True
				### OCR IS DONE ###
				# remove empty boxes				
				
				print bigBoxes
				print ocrResults
				
				if autocorrect:
					print 'autocorrect'
					# correct words
					for key in ocrResults.keys():
						word = ocrResults[key]
						if len(word) > 3:
							if not dicto.WordInDictionary(word): 
								correct = dicto.BestMatch(word,2)
								if correct is None:
									print 'Removed %s' % word
									ocrResults[key] = ''
								else:
									print '%s=>%s' % (word,correct)
									ocrResults[key] = correct.word
							else:
								print 'Keeping %s' % word
					print 'autocorrect done'
				
				newBoxes = []
				newOcrResults = {}
				for i in range(0, len(bigBoxes)):
					if  ocrResults.has_key(i) and (ocrResults[i] is not None and len(ocrResults[i]) > 0):
						print 'Adding %s' % ocrResults[i]
						newBoxes.append(bigBoxes[i])
						newOcrResults[len(newBoxes)-1] = ocrResults[i]
				
				bigBoxes = newBoxes
				global ocrResults
				ocrResults.clear() 
				for key in newOcrResults.keys():
					ocrResults[key] = newOcrResults[key]				
				
				print bigBoxes
				print ocrResults	
				
				speech.Say('OCR complete. Found %d items' %len([v for v in ocrResults.values() if v is not None and v != '']))
				
			
				
				print 'ocrresults %d boxes %d' % (len(bigBoxes),len(ocrResults.keys()))
				
				smallBoxes = [(p[0]*1./bigScale,p[1]*1./bigScale,p[2]*1./bigScale,p[3]*1./bigScale) for p in bigBoxes]
				SaveState(smallBoxes, documentCorners, aspectRatio, ocrResults, docWidth, docHeight, transform, transformInv)
				print 'Saved state'
				
				# auto add overlay
				overlays, overlayIndex = CreateOverlays(smallBoxes, docWidth, docHeight, sides=overlayNumSides)
				
				# # we can save this state
				# state = {}
				# state['ocrResults'] = ocrResults
				# state['documentCorners'] = documentCorners
				# state['aspectRatio'] = aspectRatio
				# state['transform'] = transform
				# state['transformInv'] = transformInv
				# state['smallBoxes'] = smallBoxes
				# state['docWidth'] = docWidth
				# state['docHeight'] = docHeight
				# state['overlays'] = overlays
				# timestamp = int(time.time())
				# pickle.dump(state, open('logs/data-%d.pickle' % timestamp, 'wb'))
				
		### STEP FOUR: IF WE HAVE A DOCUMENT, TRACK TOUCH
		if ocrDone:
			finger = GetFingerPosition(imgFG,imgFinger)
			if finger is None: lastTouched = None
			else:
				fingerTrans = util.Transform(finger, transform)
				
				if not tracking:
					if colorMode:
						cv.CvtColor(imgCopy,imgHSV,cv.CV_BGR2HSV)
						color = cv.Get2D(imgHSV, max(finger[1]-20,0), finger[0])
						colorName = GetColorName(color)
						# if colorName != lastColor:
						speech.Say(colorName)
						# lastColor = colorName
					else:
						handledFinger = False
						# first, check to see if we are in an overlay
						for overlay in overlays:
							if not handledFinger and util.PointInsideRect(fingerTrans, overlay):
								index = overlays.index(overlay)
								if lastTouched == overlay:
									if not tableMode: 
										accumulator += 1
									if accumulator > 60: # start tracking
										speech.Say('Locating %s' % ocrResults[index])
										trackingTarget = smallBoxes[index]
										tracking = True
										accumulator = 0
								else:
									try:
										speech.Say('Shortcut to %s' % ocrResults[index])
										accumulator = 1
									except KeyError:
										print 'whoops'
								lastTouched = overlay
								handledFinger = True
						# if that doesn't work, see if we are in the paper
						if not handledFinger and util.PointInsideRect(fingerTrans, (0,0,docWidth,docHeight)) and len(smallBoxes) > 0:
							# if so, get the closest box
							# are we inside a box?
							boxesInside = [b for b in smallBoxes if util.PointInsideRect(fingerTrans,b)]
							closestBox = None
							if len(boxesInside) > 0:
								closestBox = boxesInside[0]
							else:
								closestBox = min(smallBoxes, key=lambda b: util.distance(fingerTrans, (b[0]+b[2]/2,b[1]+b[3]/2)))
							index = smallBoxes.index(closestBox)
							box = smallBoxes[index]
							if lastTouched == box:
								pass
							else: # we are in a box
								if not tableMode: 
									try:
										speech.Say(ocrResults[index])
									except KeyError:
										print 'whoops'
								else: # get the row and column
									row, col = GetTableHeaders(box,leftRow,topRow)
									speech.Say('%s, row %s, column %s' % (ocrResults[index],row,col))
							lastTouched = box
				else: # tracking
					# are we in are target?
					if util.PointInsideRect(fingerTrans, trackingTarget):
						index = smallBoxes.index(trackingTarget)
						name = ocrResults[index]
						util.beep()
						speech.Say('Located %s' % name)
						tracking = False
					else:
						box = trackingTarget
						trackPoint = [box[0]+box[2]/2,box[1]+box[3]/2]
						dx = trackPoint[0] - fingerTrans[0]
						dy = trackPoint[1] - fingerTrans[1]
		
						# get dir
						direction = ''
						if abs(dx) > abs(dy):
							if dx > 0: direction = 'right'
							else: direction = 'left'
						else:
							if dy > 0: direction = 'down'
							else: direction = 'up'
						
						if counter % 10 == 0:
							speech.Say(direction)
		
		# mini key handler
		if char == 'b':
			showBG = not showBG
			print 'Show bg? %s' % showBG
		elif char == 's':
			showSkin = not showSkin
			print 'Show skin? %s' % showSkin
		elif char == 'c':
			colorMode = not colorMode
			print 'Color mode? %s' % colorMode
		elif char == 'o':
			overlays, overlayIndex = CreateOverlays(smallBoxes, docWidth, docHeight, sides=overlayNumSides)
			print 'Created overlay'
		elif char == 'a': #all
			# get all results
			items = [ocrResults[i] for i in range(0, len(smallBoxes)) if ocrResults[i] is not None and ocrResults[i] != '']
			whatToSay = '%d items. ' % len(items)
			for i in items:
				whatToSay += 'Item %s. ' %i
			speech.Say(whatToSay)
		elif char == 'u':
			print 'Updating bg'
			for i in range(0, trainImages):
				smFrame = cv.QueryFrame(camera)
				util.RotateImage(smFrame, imgCopy, rotate)
				bg2.FindBackground(imgCopy, imgFG, bgModel)
				cv.ShowImage(windowTitle, imgFG)
				cv.WaitKey(10)
		elif char == ' ':
			speech.StopSpeaking()
			tracking = False
				
		### STEP FOUR AND A HALF: CHECK FOR VOICE COMMANDS
		
		if ocrDone and not tracking and char == 'v':
			# available voice commands: color mode, text mode, find, rerecognize, cancel
			util.beep()
			print 'Waiting for voice command'
			commands = ['color','text','find','recognize','cancel', 'list','help','overlay']
			input = speech.listen(commands, 5)
			if input is None: 
				util.beep()
				speech.Say('Canceled')
			elif input == 'help':
				speech.Say('You can say ' + ', '.join(commands))
				colorMode = True
			elif input == 'color':
				speech.Say('Color mode selected')
				colorMode = True
			elif input == 'text':
				speech.Say('Text mode selected')
				colorMode = False
			elif input == 'recognize':
				if lastTouched is None: speech.Say('No text selected')
				else:
					speech.Say('rerecognizing')
					proc = multiprocessing.Process(target = ocr.CallOCREngine, args = (i, './ocrtemp/', 'abbyy'), callback = ocrCallback)
					proc.start()
					proc.join()
			elif input == 'cancel':
				speech.Say('Canceled')
			elif input == 'find':
				util.beep()
				speech.Say('Say the first word of the section you wish to find')
				# get first words
				firstWords = []
				phrases = []
				for i in range(0, len(ocrResults)):
					if ocrResults.has_key(i):
						phrase = ocrResults[i]
						phrases.append(phrase)
						if phrase is None:
							firstWords.append(None)
						else:
							firstWords.append(phrase.split(' ')[0].lower())

				wordFind = speech.listen(firstWords, 10)
				if wordFind is None: 
					util.beep()
					speech.Say('Not found')
				else:
					match = firstWords.index(wordFind.lower())
					speech.Say('Tracking %s' % ocrResults[match])
					tracking = True
					trackingTarget = smallBoxes[match]
			elif input == 'overlay':
				overlays, overlayIndex = CreateOverlays(smallBoxes, docWidth, docHeight, sides=overlayNumSides)
				speech.Say('Added overlays')
			elif input == 'table':
				speech.Say('Set table mode')
				overlays, overlayIndex = CreateTableOverlays(smallBoxes, docWidth, docHeight)
			elif input == 'list':
				# get all results
				items = [ocrResults[i] for i in range(0, len(smallBoxes)) if ocrResults.has_key(i) and ocrResults[i] is not None and ocrResults[i] != '']
				whatToSay = '%d items. ' % len(items)
				for i in items:
					whatToSay += 'Item %s. ' %i
				speech.Say(whatToSay)
		
		### STEP FIVE: DRAW EVERYTHING
		# doc corners
		util.DrawPoints(imgCopy, documentCorners, color=(255,0,0))
		
		# boxes and overlays
		for i in range(0,len(smallBoxes)):
			b = smallBoxes[i]
			if ocrResults.has_key(i) and (ocrResults[i] is not None and ocrResults[i] != ''):
				util.DrawRect(imgCopy, b, color=(0,0,255), transform=transformInv)
				pbox = util.Transform((b[X],b[Y]), transformInv)
				util.DrawText(imgCopy, ocrResults[i], pbox[X], pbox[Y], color=(0,0,255))				

		for i in range(0, len(overlays)):
			o = overlays[i]
			# get relevant box index
			box = overlayIndex[i]
			util.DrawRect(imgCopy, o, color=(0,100,255), transform=transformInv)
			if ocrResults.has_key(i) and (ocrResults[i] is not None and ocrResults[i] != ''):
				po = util.Transform((o[X],o[Y]), transformInv)
				util.DrawText(imgCopy, ocrResults[box], po[X], po[Y], color=(0,100,255))
				
		if finger is not None:
			util.DrawPoint(imgCopy, finger, color=(0,0,255))
		
		imgToShow = imgCopy
		if showSkin: imgToShow = imgSkin
		elif showBG: imgToShow = imgFG
		
		cv.ShowImage(windowTitle, imgToShow)
		
		### STEP END: INCREMENT THE COUNTER
		counter += 1
		### END OF MAIN LOOP

	logger.debug('Ended')
Ejemplo n.º 4
0
def run():
	lastRunMaxTime=0 # for cache overwrite after this
	last_cache_time_file = LastCachedRumTimeFile
	if os.path.exists(last_cache_time_file):
		with open(last_cache_time_file,"r") as f: 
			try:
				lastRunMaxTime = int(f.readline())
			except: pass
	if SetCacheLastTime is not None: lastRunMaxTime = SetCacheLastTime

	print(My_Ip, SUMO_PORT)
	traci.init(int( SUMO_PORT)) 
	var2str, varlist = get_sumo_vars(traci)
	badTls, badCross= [],[]
	Car.traci = traci

	tlslist=traci.trafficlights.getIDList()
	print('# tls', len(tlslist))
	tlsSet = set(tlslist)
	id2tls=dict()
	e2tls =dict() # controlled edge to tls.
	Tls.ofid = open(OutputDir+"tlslog.txt","w") if _Do_Output else None
	Tls.ofidname =  OutputDir+"tlslog.txt"
	Tls.traci = traci
	Tls.feedbackData = feedbackData
	unlearnedTls =[]
	for tlsid in tlsSet:
		tmp = Tls(tlsid, loadStages = True, stageFolder= TlsLoadStageDir) 
		id2tls[tlsid]=tmp
		ctrlanes = tmp.ctrlanes
		tmp.output_file = _Do_Output # writePhaseLog?
		for lane in ctrlanes:
			e2tls[lane.split("_",1)[0]]=tmp
		if len(tmp.stageList)==0: # not learned
			unlearnedTls.append(tmp)

	alledges = [eid for eid in traci.edge.getIDList() if not eid.startswith(":")] 
	edgeSet = set(alledges)
	print('# edges', len(edgeSet), "tls-ctr edges:",len(e2tls))
	if _Do_Output:
		with open(OutputDir+"netinfo.txt","w") as f:
			f.write("nedge=%d nctredge=%d ntls=%d"%(len(edgeSet),len(e2tls),len(id2tls)))
	lane2vq={} # vehicle queue at cross.

	id2car = dict()
	id2group = dict()
	Car.ofid = open(OutputDir+"carlog.txt","w") if _Do_Output else None
	Car.ofidname = OutputDir+"carlog.txt"
	Car.mvfd = open(OutputDir+"carmov.txt","w") if _Do_Output else None
	Car.mvfdname = OutputDir+"carmov.txt"
	Car.spdfd = open(OutputDir+"carspds.txt","w") if _Do_Output else None
	Car.spdfdname = OutputDir+"carspds.txt"
	Car.lane2vq = lane2vq
	Car.id2car = id2car
	Car.id2group = id2group
	Car.feedbackData = feedbackData
	Car.congestedAreas = congestedAreas
	Car.RemoveWongLaneThresh = 2

	print("!!! [cache] lastRunMaxTime",lastRunMaxTime)
	curtime=0
	sumoRunTime=0.
	manageRunTime=0.
	processRunTime=0.
	is_showing = None
	if Has_GUI_Device: 
		Car.traci.gui.setZoom('View #0',1000)
		vprint = set([]) #  to track this vehicle
	else: vprint=set([]) 

	while curtime<config.simulationEndTime+1:
		startTime = time.time()
		traci.simulationStep() 
		sumoRunTime += time.time()-startTime

		curtime = traci.simulation.getTime() -1 
		if Has_GUI_Device and curtime == -1:
			beep(5)
			raw_input("\nEnter any to Continue...") # debug purpose

		if curtime%30 ==0 or curtime==config.simulationEndTime:  
			print("\nT=%d  Vnum=%d sT=%.2f mT=%.2f pT=%.2f"%(curtime,len(id2car),1000*sumoRunTime/(curtime+1),1000*manageRunTime/(curtime+1),1000*processRunTime/(curtime+1)))
			if curtime>lastRunMaxTime: 
				lastRunMaxTime = curtime
				with open(last_cache_time_file,"w") as f: 
					f.write("%d"%lastRunMaxTime)

		if curtime%31==0 or is_showing is not None: 
			badTls= get_bad_list(feedbackData, "tls",TlsBadRateThresh, id2tls=id2tls)
			badCross= get_bad_list(feedbackData, "cross", CrossBadRateThresh)
			for tid in badTls: 
				print("Checkout bad tls "+tid)
			for nid in badCross: 
				print("bad cross "+nid)
				congestedAreas["cross"].add(nid)


		for vid in traci.simulation.getStartingTeleportIDList():# stuck
			try:
				id2car[vid].remove(curtime,output=False) # traci remove + setDead 
			except: pass
			try:
				del id2car[vid]
			except: pass

		startTime = time.time()
		for vid in traci.simulation.getArrivedIDList():
			if vid in id2car:
				id2car[vid].setDead(curtime) # already auto removed.
				del id2car[vid]

		for vid in traci.simulation.getDepartedIDList(): # new born cars 
			tmp = Car(vid, modelPredictFuel=True)
			tmp.subscribe(varlist)
			tmp.setStartTime(curtime)
			rid = tmp.getRouteID()
			tmp.route = construct_segment(tmp.rou, e2tls, route_id=rid, print_str=vid in vprint, overwrite_cache=curtime>=lastRunMaxTime, prefix=config.osmfile ) 
			tmp.route.id = rid
			tmp.route.car = tmp
			tmp.e2tls= e2tls
			if vid.startswith('a'):
				tmp.do_Tls1 = R['tls1']>0 # query 1st tls ahead
				tmp.do_Tls2 = R['tls2']>0 # query two tls ahead
				tmp.do_coor = R['co']>0 # codrive
				tmp.do_optimizeSpeed = True
			else:
				tmp.do_Tls1 = False
				tmp.do_Tls2 = False
				tmp.do_coor = False
				tmp.do_optimizeSpeed = False # false will not do lane change.
			tmp.output_file = _Do_Output 
			id2car[vid]=tmp
			if vid in vprint: 
				tmp.iprintSet |= set([2,3,9]) # track this car, and print more
				tmp.route.print_str = True
				tmp.setColor((255,255,0))
				print(tmp.rou)
				pprint.pprint(tmp.route)
		manageRunTime += time.time()-startTime

		startTime = time.time()
		for tlsid in tlsSet: # proc tls, called before car.process
			id2tls[tlsid].process(curtime)

		for vid,car in id2car.items(): 
			car.process(curtime) # fill in basic variables

		for lid,que in lane2vq.items():
			que.sort(key=lambda car: car.lanePos) # sort by lane-pos.

		for vid,car in id2car.items(): 
			car.formGroup()

		for vid,car in id2car.items(): 
			car.optimize() # run optimization.

		for vid,car in id2car.items(): 
			car.transaction() # if codrive

		processRunTime += time.time()-startTime

	

	traci.close()
	Car.ofid.close()
	Tls.ofid.close()
	Car.mvfd.close()
	Car.spdfd.close()