예제 #1
0
def openImageOCR(digitsData):

	try: 
		imageFileName = raw_input('--> Please enter name of the digit image file to be recognized (0 to go back): ')
		if imageFileName == '0':
			return
		print 'Opening '  + imageFileName + '...'
		imageFileName = os.getcwd() + '/images/' + imageFileName
		originalImage = Image.open(imageFileName).convert('L')
		print '    Image Format: {0}'.format(originalImage.format)
		print '    Image Mode: {0}'.format(originalImage.mode)
		print '    Image Size: {0}'.format(originalImage.size)
		print 'Converting the original file to grayscale and displaying...'
		originalImage.show()
		print "***************************************************"
		print 'Recognizing the digit...'

		featureVector = ocr_machine_learning.getVector(originalImage,digitsData)
		print 'The feature vector for the image is: {0}'.format(featureVector)

		finalDigit = ocr_machine_learning.recognizeDigit(featureVector)
		print 'The digit in the image is:'
		print digitASCII.digits[finalDigit]

		checkCorrectDigitCommandLine(finalDigit,originalImage)

	except Exception, e:
		print "Error opening the file!"
예제 #2
0
	def recognize(self):
		if len(self.canvas.find_all()) != 0:
			self.checkingDigit = True

			self.originalImage = ImageOps.invert(self.image)
			print "***************************************************"
			print 'Recognizing the digit...'

			featureVector = ocr_machine_learning.getVector(self.originalImage, self.digitsData)
			print 'The feature vector for the image is: {0}'.format(featureVector)

			finalDigit = ocr_machine_learning.recognizeDigit(featureVector)
			print 'The digit in the image is:'
			print digitASCII.digits[finalDigit]

			self.checkCorrectDigitGUI(finalDigit)

		return