Ejemplo n.º 1
0
def ocrImage(tagDest,tessdataPrefix,lang,charWhitelist,pageMode):
  destOcrImg = "/tmp/"+genymotion_vm_name+"-"+tagDest+".png"
  print "OCR : "+str(destOcrImg)
  #OCR Def
  tr = Tesseract(tessdataPrefix, lang)
  tr.set_variable("tessedit_char_whitelist", charWhitelist)
  tr.set_page_seg_mode(pageMode)
  #OCR
  image = Image.open(destOcrImg)
  tr.set_image(image)
  return tr.get_utf8_text()
def ocr_text(img):
    tr = Tesseract(lang='eng')
    tr.clear()
    pil_image = pil.Image.fromarray(img)
    # Turn off OCR word dictionaries
    tr.set_variable('load_system_dawg', "F")
    tr.set_variable('load_freq_dawg', "F")
    tr.set_variable('-psm', "7") # treat image as single line
    tr.set_variable('tessedit_char_whitelist', "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
    tr.set_image(pil_image)
    utf8_text = tr.get_text()
    return unicode(utf8_text)
Ejemplo n.º 3
0
#much stuff got commented in the end, staying here for a while for educational reasons only
#need to add connection to video from robot
import numpy as np
import vision_definitions
from time import sleep
import cv2
#from pytesser import *
from tesserwrap import Tesseract
from PIL import Image
from naoqi import ALProxy

ocr = Tesseract()
ocr.set_variable("tessedit_char_whitelist", "ABCDEFGHIJKLMNOPQRSTUVWXYZ") #since we use upper cased text only
#ocr.set_variable("classify_enable_learning", "0")
#ocr.set_variable("classify_enable_adaptive_matcher", "0")

#cap = cv2.VideoCapture(0)

#connecting to the robot
IP = "192.168.0.238"
#speech module
tts = ALProxy("ALTextToSpeech", IP, 9559)
cameraid=0
camProxy = ALProxy("ALVideoDevice", IP, 9559)
resolution = vision_definitions.kVGA
colorSpace = vision_definitions.kBGRColorSpace
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
camProxy.setParam(vision_definitions.kCameraSelectID, cameraid)
#9137743885