Esempio n. 1
0
def detect():
    data = request.get_json()
    img_path = data['path']
    try:
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        print("img", img_path, img.shape)

        target_boxes = detector.detect_bbox(copy.deepcopy(img))
        all_points = npPointsCraft.detect(img, target_boxes)
        all_points = [ps for ps in all_points if len(ps)]

        # cut zones
        rgb_zones = [
            getCvZoneRGB(img, reshapePoints(rect, 1)) for rect in all_points
        ]
        zones = convertCvZonesRGBtoBGR(rgb_zones)

        # find standart
        region_ids, count_lines = optionsDetector.predict(zones)
        region_names = optionsDetector.getRegionLabels(region_ids)

        # find text with postprocessing by standart
        text_arr = textDetector.predict(zones, region_names, count_lines)
        return ujson.dumps(dict(res=text_arr, img_path=img_path))
    except Exception as e:
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb)
        return ujson.dumps(dict(error=str(e), img_path=img_path))
Esempio n. 2
0
def test(dir_name, fname):
    start_time = time.time()
    img_path = os.path.join(dir_name, fname)
    with open(img_path, 'rb') as in_file:
        img = jpeg.decode(in_file.read())
    image_load_time = time.time() - start_time

    start_time = time.time()
    targetBoxes = detector.detect_bbox(img)
    detect_bbox_time = time.time() - start_time

    start_time = time.time()
    all_points = npPointsCraft.detect(img, targetBoxes)
    all_points = [ps for ps in all_points if len(ps)]
    craft_time = time.time() - start_time

    start_time = time.time()

    zones = convertCvZonesRGBtoBGR(
        [getCvZoneRGB(img, reshapePoints(rect, 1)) for rect in all_points])

    perspective_align_time = time.time() - start_time

    start_time = time.time()
    regionIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)
    classification_time = time.time() - start_time

    start_time = time.time()
    _ = textDetector.predict(zones, regionNames, countLines)
    ocr_time = time.time() - start_time
    return image_load_time, detect_bbox_time, craft_time, perspective_align_time, classification_time, ocr_time
Esempio n. 3
0
async def test(dirName, fname, y, min_bbox_acc = 0.5, verbose=0):
    nGood = 0
    nBad  = 0
    img_path = os.path.join(dirName, fname)
    if verbose==1:
        print(colored(f"__________ \t\t {img_path} \t\t __________", "blue"))
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    targetBoxes = detector.detect_bbox(img)

    all_points = npPointsCraft.detect(img, targetBoxes,[5,2,0])
    # for  images/14.jpeg bug
    all_points = [ps for ps in all_points if len(ps)]

    print('ll_points')
    print(all_points)
    # cut zones
    toShowZones = [getCvZoneRGB(img, reshapePoints(rect,1)) for rect in all_points]
    #toShowZones = getCvZonesRGB(img, all_points)
    zones = convertCvZonesRGBtoBGR(toShowZones)
    #zones = rectDetector.get_cv_zonesBGR(img, all_points)
    #toShowZones = rectDetector.get_cv_zonesRGB(img, all_points)
    for zone, points in zip(toShowZones, all_points):
        plt.axis("off")
        plt.imshow(zone)
        plt.show()

    # find standart
    regionIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)
    print(regionNames)
    print(countLines)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones, regionNames, countLines)
    print(textArr)

     # draw rect and 4 points
    for targetBox, points in zip(targetBoxes, all_points):
        # draw
        cv2.rectangle(img,
                      (int(targetBox[0]), int(targetBox[1])),
                      (int(targetBox[2]), int(targetBox[3])),
                      (0,120,255),
                      3)
        #print(points, points.shape)
        cv2.polylines(img, np.array([points], np.int32), True, (255,120,255),3)
    plt.imshow(img)
    plt.show()

    for yText in y:
        if yText in textArr:
            print(colored(f"OK: TEXT:{yText} \t\t\t RESULTS:{textArr} \n\t\t\t\t\t in PATH:{img_path}", 'green'))
            nGood += 1
        else:
            print(colored(f"NOT OK: TEXT:{yText} \t\t\t RESULTS:{textArr} \n\t\t\t\t\t in PATH:{img_path} ", 'red'))
            nBad += 1
    return nGood, nBad
Esempio n. 4
0
def read_number_plates(img):
    targetBoxes = detector.detect_bbox(img)
    all_points = npPointsCraft.detect(img, targetBoxes, [5, 2, 0])

    # cut zones
    zones = convertCvZonesRGBtoBGR(
        [getCvZoneRGB(img, reshapePoints(rect, 1)) for rect in all_points])

    # predict zones attributes
    regionIds, stateIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)

    return textArr, regionNames
Esempio n. 5
0
rootDir = '../images/*'

imgs = [mpimg.imread(img_path) for img_path in glob.glob(rootDir)]

for img in imgs:
    targetBoxes = detector.detect_bbox(copy.deepcopy(img))
    targetBoxes = targetBoxes

    all_points = npPointsCraft.detect(img, targetBoxes)
    all_points = [ps for ps in all_points if len(ps)]
    print(all_points)

    # cut zones
    toShowZones = [getCvZoneRGB(img, reshapePoints(rect, 1)) for rect in all_points]
    zones = convertCvZonesRGBtoBGR(toShowZones)
    for zone, points in zip(toShowZones, all_points):
        plt.axis("off")
        plt.imshow(zone)
        plt.show()

    # find standart
    regionIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)
    print(regionNames)
    print(countLines)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones, regionNames, countLines)
    print(textArr)
Esempio n. 6
0
from NomeroffNet.OptionsDetector import OptionsDetector
from NomeroffNet.TextDetector import TextDetector
from NomeroffNet.TextPostprocessing import textPostprocessing

# load models
optionsDetector = OptionsDetector()
optionsDetector.load("latest")

textDetector = TextDetector.get_static_module("eu")()
textDetector.load("latest")

# Detect numberplate
img_path = 'images/example2.jpeg'
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

targetBoxes = detector.detect_bbox(img)
all_points = npPointsCraft.detect(img, targetBoxes,[5,2,0])

# cut zones
zones = convertCvZonesRGBtoBGR([getCvZoneRGB(img, reshapePoints(rect, 1)) for rect in all_points])

# predict zones attributes
regionIds, countLines = optionsDetector.predict(zones)
regionNames = optionsDetector.getRegionLabels(regionIds)

# find text with postprocessing by standart
textArr = textDetector.predict(zones)
textArr = textPostprocessing(textArr, regionNames)
print(textArr)