コード例 #1
0
ファイル: views.py プロジェクト: TomatoCream/nuspyp_django
def index(request):
    #from tesserwrap import Tesseract
    #from PIL import Image
    img = Image.open("/home/df/projects/django/nuspyp/tesseracttest/test.png")
    tr = Tesseract()
    tr.ocr_image(img)

    img2 = dog(
        filename='/home/df/projects/django/nuspyp/tesseracttest/source.pdf')
    single_image = img2.sequence[0]
    tr.ocr_image(single_image)

    return HttpResponse(tr.get_text())
コード例 #2
0
ファイル: tesseract.py プロジェクト: CodeForAfrica/aleph
def extract_image_data(data, languages=None):
    """Extract text from a binary string of data."""
    tessdata_prefix = get_config('TESSDATA_PREFIX')
    if tessdata_prefix is None:
        raise IngestorException("TESSDATA_PREFIX is not set, OCR won't work.")
    languages = get_languages_iso3(languages)
    text = Cache.get_ocr(data, languages)
    if text is not None:
        return text
    try:
        img = Image.open(StringIO(data))
    except DecompressionBombWarning as dce:
        log.debug("Image too large: %", dce)
        return None
    except IOError as ioe:
        log.info("Unknown image format: %r", ioe)
        return None
    # TODO: play with contrast and sharpening the images.
    extractor = Tesseract(tessdata_prefix, lang=languages)
    extractor.set_page_seg_mode(PageSegMode.PSM_AUTO_OSD)
    text = extractor.ocr_image(img)
    extractor.clear()
    log.debug('OCR done: %s, %s characters extracted',
              languages, len(text))
    Cache.set_ocr(data, languages, text)
    return text
コード例 #3
0
ファイル: tesseract.py プロジェクト: pudo/extractors
def extract_image_data(data, languages=None):
    """Extract text from a binary string of data."""
    if TESSDATA_PREFIX is None:
        raise ValueError("Env TESSDATA_PREFIX is not set, OCR will not work.")
    key, text = get_cache(data)
    if text is not None:
        return text
    try:
        img = Image.open(StringIO(data))
    except Exception as ex:
        log.debug("Failed to parse image internally: %r", ex)
        return ""

    # TODO: play with contrast and sharpening the images.
    try:
        languages = _get_languages(languages)
        extractor = Tesseract(TESSDATA_PREFIX, lang=languages)
        extractor.set_page_seg_mode(PageSegMode.PSM_AUTO_OSD)
        text = extractor.ocr_image(img)
        log.debug("OCR done: %s, %s characters extracted", languages, len(text))
        set_cache(key, text)
        return text
    except Exception as ex:
        log.exception(ex)
        return ""
コード例 #4
0
def extract_image_data(data, languages=None):
    """Extract text from a binary string of data."""
    if TESSDATA_PREFIX is None:
        raise ValueError('Env TESSDATA_PREFIX is not set, OCR will not work.')
    key, text = get_cache(data)
    if text is not None:
        return text
    try:
        img = Image.open(StringIO(data))
    except Exception as ex:
        log.debug('Failed to parse image internally: %r', ex)
        return ''

    # TODO: play with contrast and sharpening the images.
    try:
        languages = _get_languages(languages)
        extractor = Tesseract(TESSDATA_PREFIX, lang=languages)
        extractor.set_page_seg_mode(PageSegMode.PSM_AUTO_OSD)
        text = extractor.ocr_image(img)
        log.debug('OCR done: %s, %s characters extracted', languages,
                  len(text))
        set_cache(key, text)
        return text
    except Exception as ex:
        log.exception(ex)
        return ''
コード例 #5
0
ファイル: backend.py プロジェクト: jpbreuer/junction2016
def parse_img():
    im = Image.open("./temp.jpg") # the second one
    im = im.filter(ImageFilter.MedianFilter())
    enhancer = ImageEnhance.Contrast(im)
    im = enhancer.enhance(2)
    im = im.convert('1')
    im.save('./temp2.jpg')
    tr = Tesseract(os.environ["TESSDATA_PREFIX"],"eng")
    text = tr.ocr_image(Image.open('./temp2.jpg'))
    return redirect('http://mailsnail.tech/api/notify')
コード例 #6
0
def tesseract():
    global semaphore

    while(True):
        if not os.path.exists("./output.png"):
            break

        semaphore.acquire()
        img = Image.open("output.png")
        tr = Tesseract("/usr/local/share")
        text = tr.ocr_image(img)
        print text
        '''
        subprocess.call(["tesseract", "output.png","out"])
        '''
        semaphore.release()
コード例 #7
0
ファイル: tesseract.py プロジェクト: tomjie/aleph
def extract_image_data(data, languages=None):
    """Extract text from a binary string of data."""
    tessdata_prefix = get_config('TESSDATA_PREFIX')
    if tessdata_prefix is None:
        raise IngestorException("TESSDATA_PREFIX is not set, OCR won't work.")
    languages = get_languages_iso3(languages)
    text = Cache.get_ocr(data, languages)
    if text is not None:
        return text
    img = Image.open(StringIO(data))
    # TODO: play with contrast and sharpening the images.
    extractor = Tesseract(tessdata_prefix, lang=languages)
    extractor.set_page_seg_mode(PageSegMode.PSM_AUTO_OSD)
    text = extractor.ocr_image(img)
    log.debug('OCR done: %s, %s characters extracted', languages, len(text))
    Cache.set_ocr(data, languages, text)
    return text
コード例 #8
0
ファイル: tesseract.py プロジェクト: nivertech/aleph
def extract_image_data(data, languages=None):
    """Extract text from a binary string of data."""
    tessdata_prefix = get_config('TESSDATA_PREFIX')
    if tessdata_prefix is None:
        raise IngestorException("TESSDATA_PREFIX is not set, OCR won't work.")
    languages = get_languages_iso3(languages)
    text = Cache.get_ocr(data, languages)
    if text is not None:
        return text
    img = Image.open(StringIO(data))
    # TODO: play with contrast and sharpening the images.
    extractor = Tesseract(tessdata_prefix, lang=languages)
    extractor.set_page_seg_mode(PageSegMode.PSM_AUTO_OSD)
    text = extractor.ocr_image(img)
    log.debug('OCR done: %s, %s characters extracted',
              languages, len(text))
    Cache.set_ocr(data, languages, text)
    return text
コード例 #9
0
ファイル: utils.py プロジェクト: joashxu/django-tesseract
    def pages(self):
        for page in range(self.file.numPages):
            img = WandImage(filename=self.path + ('[%s]' % page),
                resolution=self.config['wand_resolution'])
            img.compression_quality = self.config['wand_compression_quality']
            temp = NamedTemporaryFile(suffix='.jpg')
            # Passing temp as file kwargs does not work for some reason.
            # So we just pass the filename.
            img.save(filename=temp.name)

            # Reopen the image file as PIL object
            img = Image.open(temp.name)

            # Run tesseract
            tr = Tesseract()
            result = tr.ocr_image(img)

            temp.close()

            yield result
コード例 #10
0
def ocr(info):
    cv2.imwrite('../fig/info.jpg', info)
    img = Image.open('../fig/info.jpg')
    tr = Tesseract(datadir='../data', lang='eng')
    text = tr.ocr_image(img)
    print(text)
コード例 #11
0
    def handleFrameForTaskB(self, frame, regionCoordinates):

        try:
            coordinates = list()
            for point in regionCoordinates:
                coordinates.append(
                    [point[0] * frame.shape[1], point[1] * frame.shape[0]])
            coordinates = np.int0(coordinates)
            frame = cv2.drawContours(frame, [coordinates], 0, (0, 255, 0), 2)
            warped = four_point_transform(frame, coordinates)
            shrunk = cv2.cvtColor(warped[:, int(warped.shape[1] / 10):],
                                  cv2.COLOR_BGR2GRAY)
            scale = 6
            shrunk = cv2.resize(
                shrunk, (shrunk.shape[1] * scale, shrunk.shape[0] * scale),
                interpolation=cv2.INTER_CUBIC)
            _, shrunk = cv2.threshold(shrunk, 100, 255,
                                      cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            shrunk = 255 - cv2.dilate(
                255 - shrunk, np.ones((1, 1)), iterations=1)

            num, features = cv2.connectedComponents(255 - shrunk)

            plate = str()
            corners = list()
            for i in range(0, num):
                area = np.sum((features == i))
                if area > scale**2 * 2 * 25 and area < scale * 4 * 500:
                    rows = np.any(features == i, axis=1)
                    cols = np.any(features == i, axis=0)
                    rmin, rmax = np.where(rows)[0][[0, -1]]
                    cmin, cmax = np.where(cols)[0][[0, -1]]
                    corners.append([rmin, cmin, rmax, cmax])
            corners = np.array(corners)

            idx = np.argsort(corners[:, 1])
            sorted_corners = corners[idx]

            for corner in sorted_corners:
                minx = corner[0] - 2
                miny = corner[1] - 2
                maxx = corner[2] + 2
                maxy = corner[3] + 2

                if minx < 0:
                    minx = 0
                if miny < 0:
                    miny = 0

                snip = features[minx:maxx, miny:maxy]

                if snip.shape[1] > snip.shape[0]:
                    continue

                snip = cv2.erode(snip.astype(np.uint8),
                                 np.ones((5, 5)),
                                 iterations=1)
                im = Image.fromarray(np.uint8(snip))
                tr = Tesseract(datadir="/usr/share/tessdata")

                letter = tr.ocr_image(im).rstrip()
                for l in letter:
                    if l.isalnum():
                        letter = l
                plate += letter.capitalize()

            alphs = "".join(itertools.takewhile(str.isalpha, plate))
            nums = plate[len(alphs):]

            if len(alphs) == 2:
                plate = alphs[0] + "-" + alphs[1] + "-" + nums
            elif len(alphs) == 5:
                plate = alphs[:3] + "-" + alphs[3:] + "-" + nums
            else:
                diffs = list()
                alphscorners = sorted_corners[:len(alphs)]
                for i in range(len(alphscorners)):
                    if sorted_corners[i][1] == alphscorners[-1][1]:
                        break
                    diffs.append(sorted_corners[i + 1][1] -
                                 sorted_corners[i][3])

                cuts = np.array(diffs) > np.mean(diffs)
                rev_cuts = cuts[::-1]
                for i in range(len(cuts[::-1])):
                    if (rev_cuts[i] == 1):
                        alphs = alphs[:len(cuts) - i] + "-" + alphs[len(cuts) -
                                                                    i:]
                plate = alphs + "-" + nums
            if len(plate) < 5:
                return None
            elif len(plate) > 11:
                return None
            elif plate.count("-") > 2:
                return None
            elif plate.count("-") < 2:
                return None
            else:
                return plate
        except Exception as exception:

            return None
コード例 #12
0
warp =  warp[::-1]

#print(warp)
fig = plt.figure(1, (10,10))
grid = ImageGrid(fig, 111,
                nrows_ncols = (17, 10), 
                axes_pad=0.1, 
                aspect=True, 
                )

tsOutput = []
for i in range(0,162):
	#print warp[i]
	image = warp[i]
	(h, w) = image.shape[:2]
	center = (w / 2, h / 2)
	M = cv2.getRotationMatrix2D(center, 270, 1.0)
	rotated = cv2.warpAffine(image, M, (w, h))
	letter=cv2.flip(rotated,1)
	grid[i].imshow(letter)
	im = Image.fromarray(letter)
	text = tr.ocr_image(im).replace("\n","")	
	if text == "":
		text = " "
	tsOutput.append(text)
	#print tr.ocr_image(im),
	#print(tr.ocr_image(im),end = "")

print "".join(tsOutput)
plt.show()                    
cv2.destroyAllWindows()
コード例 #13
0
def ocr(info):
    cv2.imwrite('../fig/info.jpg', info)
    img = Image.open('../fig/info.jpg')
    tr = Tesseract(datadir='../data', lang='eng')
    text = tr.ocr_image(img)
    print(text)
コード例 #14
0
ファイル: vidRead.py プロジェクト: stasialan/NAO-Reader
    #print height, " ", width
    #sleep(5)
    #gray = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
    #height, width = gray.shape[:2]
    # Our operations on the frame come here, doing some manipulation to reduce effects from surrounding objects,
    #light&shadow play, etc 
    gray = cv2.medianBlur(gray, 5)
    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(gray, 5, 25, 25)

    #cv2.rectangle(gray, (width/2-150, height/2-150), (width/2+200, height/2+150), (255,0,0), 2)

    cv2.imshow('frame',gray) #showing image
    #gray = gray.copy()[height/2-150:height/2+150,width/2-150:width/2+200]
    image = Image.fromarray(gray) #turning array into image
    speech = ocr.ocr_image(image) #ocr-ing image by Tesseract
    #print type(speech)
    #speech = ocr.get_utf8_text()
    if ocr.get_mean_confidence() >= 63 and len(speech.strip()) >1:
        tts.say(str(speech))
        print speech 
    sleep(1)
    #robot speech

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
#cap.release()
camProxy.unsubscribe(videoClient)
cv2.destroyAllWindows()
コード例 #15
0
#print(warp)
fig = plt.figure(1, (10, 10))
grid = ImageGrid(
    fig,
    111,
    nrows_ncols=(17, 10),
    axes_pad=0.1,
    aspect=True,
)

tsOutput = []
for i in range(0, 162):
    #print warp[i]
    image = warp[i]
    (h, w) = image.shape[:2]
    center = (w / 2, h / 2)
    M = cv2.getRotationMatrix2D(center, 270, 1.0)
    rotated = cv2.warpAffine(image, M, (w, h))
    letter = cv2.flip(rotated, 1)
    grid[i].imshow(letter)
    im = Image.fromarray(letter)
    text = tr.ocr_image(im).replace("\n", "")
    if text == "":
        text = " "
    tsOutput.append(text)
    #print tr.ocr_image(im),
    #print(tr.ocr_image(im),end = "")

print "".join(tsOutput)
plt.show()
cv2.destroyAllWindows()
コード例 #16
0
from tesserwrap import Tesseract
from PIL import Image
tr = Tesseract("/usr/local/share") # this is slow
im = Image.open("test2.png")
text = tr.ocr_image(im)
print text
words = text.split()
for thing in words:
	if thing == "Arlington":
		print "found ittt"