def Display(fontC_, plate_pub_, model_path, rate_d):
    print('Start Display')

    # model load
    model_ = pr.LPR(model_path + "cascade.xml", model_path + "model12.h5",
                    model_path + "ocr_plate_all_gru.h5")
    while not rospy.is_shutdown():
        if que.empty() != True:
            frame = que.get()
            frame_intercept = imageIntercept(frame)
            for pstr, confidence, rect in model_.SimpleRecognizePlateByE2E(
                    frame_intercept):
                print(confidence)
                if confidence > 0.9:
                    #frame = drawRectBox(frame_intercept, rect, pstr + " " + str(round(confidence, 3)), fontC)
                    print("plate_str:")
                    print(pstr)
                    print("plate_confidence")
                    print(confidence)
                    print(len(pstr))
                    if len(pstr) >= 7:
                        frame = makeTextPic(frame, fontC_)
                        plateImage = getPlateImage(frame_intercept, rect)
                        cv2.imwrite("~/plate.jpg", plateImage)
                        pubPlatePicMsg(pstr, plateImage, frame, plate_pub_)
            """
            frame = cv2.resize(frame, None, fx = scaling_factor, fy=scaling_factor, interpolation = cv2.INTER_AREA)
            msg = bridge.cv2_to_imgmsg(frame, encoding = "bgr8")
            img_pub.publish(msg)
            """
            print('** publishing webcam_frame ***')
        else:
            print("que is empty")

        rate_d.sleep()
Exemplo n.º 2
0
def ImageProcessing(image_path):
    model = pr.LPR("model/cascade.xml", "model/model12.h5",
                   "model/ocr_plate_all_gru.h5")
    grr = cv2.imread(image_path)
    print("[Server] 神经网络正在处理图片.......")

    # 训练结果,为一个列表,其中每一个元素是包括字符串、自信度和车牌位置的列表
    # 如[['9F72E1', 0.6781167685985565, [145.62, 217.5, 104.53178023338317, 39.0]],
    #    ['苏E9F72E', 0.8783870339393616, [107.66, 215.05, 140.35348415374756,42.9]]]
    proc_results = model.SimpleRecognizePlateByE2E(grr)
    # 定义一个去掉结果中的位置信息的变量
    proc_results_without_pos = []
    print("[Server] 处理结果为:")
    for result in proc_results:
        print("plate_str:", result[0], "plate_confidence:", result[1])
        result.pop()  #去掉结果中的位置信息,位于最后
        proc_results_without_pos.append(result)

    print("[Server] 正在计算处理时间......")
    t0 = time.time()
    for x in range(20):
        model.SimpleRecognizePlateByE2E(grr)
    t = (time.time() - t0) / 20.0
    print("Image size: " + str(grr.shape[1]) + "x" + str(grr.shape[0]) +
          " need " + str(round(t * 1000, 2)) + "ms")

    return proc_results_without_pos, t
Exemplo n.º 3
0
def SpeedTest(grr):
    model = pr.LPR("model/cascade.xml", "model/model12.h5",
                   "model/ocr_plate_all_gru.h5")
    model.SimpleRecognizePlateByE2E(grr)
    #t0 = time.time()
    for x in range(2):
        model.SimpleRecognizePlateByE2E(grr)
Exemplo n.º 4
0
def recognize_plate(image, smallest_confidence = 0.7):
    model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5") #训练好的模型参数
    model.SimpleRecognizePlateByE2E(image)
    return_all_plate = []
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(image):
        if confidence>smallest_confidence:
            return_all_plate.append([pstr,confidence,rect])  #返回车牌号,车牌位置,置信度
    return return_all_plate
Exemplo n.º 5
0
def SpeedTest(image_path):
    grr = cv2.imread(image_path)
    model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5")
    model.SimpleRecognizePlateByE2E(grr)
    t0 = time.time()
    for x in range(20):
        model.SimpleRecognizePlateByE2E(grr)
    t = (time.time() - t0)/20.0
    print("Image size :" + str(grr.shape[1])+"x"+str(grr.shape[0]) +  " need " + str(round(t*1000,2))+"ms")
Exemplo n.º 6
0
def recognize_plate(image, smallest_confidence = 0.7):
    # # grr = cv2.imread(image_path)

    model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5")
    model.SimpleRecognizePlateByE2E(image)
    return_all_plate = []
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(image):
        if confidence>smallest_confidence:
            return_all_plate.append([pstr,confidence,rect])
    return return_all_plate
Exemplo n.º 7
0
def visual_draw_position(grr):
    model = pr.LPR("model/cascade.xml","model/model12.h5","model/ocr_plate_all_gru.h5")
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(grr):
        if confidence>0.7:
            grr = drawRectBox(grr, rect, pstr+" "+str(round(confidence,3)))
            print "车牌号:"
            print pstr
            print "置信度"
            print confidence
    cv2.imshow("image",grr)
    cv2.waitKey(0)
Exemplo n.º 8
0
def webcamImagePub():
    # init ros_node
    rospy.init_node('plate_detection', anonymous = True)
    model_ = rospy.get_param('model')
    font_ = rospy.get_param('font')
    video_ = rospy.get_param('~video')
    video_sub_ = 'H.265/ch1/main/av_stream'
    video_ = video_ + video_sub_

    fontC = ImageFont.truetype(font_, 14, 0)
    # queue_size should be small in order to make it 'real_time'
    # or the node will pub the past_frame
    #from sensor_msgs.msg import Image
    #img_pub = rospy.Publisher('webcam/image_raw', Image, queue_size = 2)
    rate = rospy.Rate(5) # 5hz
    # model load
    model = pr.LPR(model_ + "cascade.xml", model_ + "model12.h5", model_ + "ocr_plate_all_gru.h5")

    # make a video_object and init the video object
    cap = cv2.VideoCapture(video_)
    # define picture to_down' coefficient of ratio
    scaling_factor = 0.5
    # the 'CVBridge' is a python_class, must have a instance.
    # That means "cv2_to_imgmsg() must be called with CvBridge instance"
    bridge = CvBridge()

    if not cap.isOpened():
        sys.stdout.write("Webcam is not available !")
        return -1

    count = 0
    # loop until press 'esc' or 'q'
    while not rospy.is_shutdown():
        ret, frame = cap.read()
        # resize the frame
        if ret:
            count = count + 1
        else:
            rospy.loginfo("Capturing image failed.")
        if count >= 2:
            count = 0
            for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(frame):
                if confidence > 0.7:
                    frame = drawRectBox(frame, rect, pstr + " " + str(round(confidence, 3)), fontC)
                    print("plate_str:")
                    print(pstr)
                    print("plate_confidence")
                    print(confidence)

            frame = cv2.resize(frame, None, fx = scaling_factor, fy=scaling_factor, interpolation = cv2.INTER_AREA)
            #msg = bridge.cv2_to_imgmsg(frame, encoding = "bgr8")
            #img_pub.publish(msg)
            print('** publishing webcam_frame ***')
        rate.sleep()
def visual_draw_position(image_path):
    max = 0
    maxcp = ''
    grr = cv2.imread(image_path)
    model = pr.LPR("model/cascade.xml", "model/model12.h5",
                   "model/ocr_plate_all_gru.h5")
    for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(grr):
        if confidence > max:
            max = confidence
            maxcp = pstr
        grr = drawRectBox(grr, rect, pstr + " " + str(round(confidence, 3)))
    return grr, maxcp, max
def recognize_plate(image, smallest_confidence = 0.7):
    start1 = time.time()
    model = pr.LPR("../PlateRecognition/model/cascade.xml","../PlateRecognition/model/model12.h5", \
                 "../PlateRecognition/model/ocr_plate_all_gru.h5")
    #print("模型加载所需时间:" + str(time.time() - start1))
    start2 = time.time()
    return_all_plate = []
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(image):
        if confidence>smallest_confidence:
            return_all_plate.append([pstr,confidence,rect])
    #print("模型预测所需时间:" + str(time.time() - start2))
    return return_all_plate
Exemplo n.º 11
0
def detect_ssd(image, pr):

    bboxes, small_images, scores = do.detect(image)

    plate_texts = []
    confidences = []
    for img in small_images:
        plate_text, confidence = pr.recognizeOne(img)
        plate_texts.append(plate_text)
        confidences.append(confidence)

    return bboxes, small_images, plate_texts, scores, confidences
Exemplo n.º 12
0
Arquivo: api.py Projeto: pengdake/ccpd
def Recognize(imagefile, model_path, model_type):
    keras.backend.clear_session()
    graph = tf.get_default_graph()
    grr = cv2.imread(imagefile)
    cascade_path = model_path + "/cascade.xml"
    model12_path = model_path + "/model12.h5"
    if model_type == "gru":
        ocr_model_path = model_path + "/ocr_plate_all_gru.h5"
    #ocr_model_path = model_path + "/ocr_plate_all_wrnn.h5"
    #ocr_model_path = model_path + "/ocr_plate_all_w_rnn_2.h5"
    elif model_type == "wrnn":
        ocr_model_path = model_path + "/ocr_wrnn_ccpd_model.h5"

    model = pr.LPR(cascade_path, model12_path, ocr_model_path, model_type)
    for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(grr, graph):
        if confidence > 0.7:
            return pstr
Exemplo n.º 13
0
def recong(path):
    global model1
    grr = cv2.imread(str(path))
    if (model1 is 0):
        model1 = pr.LPR("model/cascade.xml", "model/model12.h5",
                        "model/ocr_plate_all_gru.h5")
    result = model1.SimpleRecognizePlateByE2E(grr)
    #print result
    if result:
        best_result = result[0][0]
        for pstr, confidence, rect in result:
            if confidence > 0.7:
                print("plate_str", pstr[-6:])
                print("plate_confidence", confidence)
                #cv2.waitKey(0)
        return best_result
    else:
        return "no plate"
def visual_draw_position(grr):
    model = pr.LPR("../PlateRecognition/model/cascade.xml","../PlateRecognition/model/model12.h5", \
        "../PlateRecognition/model/ocr_plate_all_gru.h5")
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(grr):
        if confidence>0.7:
            #grr = drawRectBox(grr, rect, pstr+" "+str(round(confidence,3)))
            print ("车牌号:")
            print (pstr)
            print ("置信度")
            print (confidence)
            print(grr.shape)
            #print(rect[1], rect[3])
            #print(rect[0], rect[2])
            cv2.imshow("plate", grr[int(rect[1]) : int(rect[3] + rect[1]), \
                            int(rect[0]) : int(rect[2]+rect[0])])
    #cv2.imshow("image",grr)
    #cv2.waitKey(0)
    return grr
Exemplo n.º 15
0
                  (int(rect[0] + rect[2]), int(rect[1] + rect[3])),
                  (0, 0, 255), 2, cv2.LINE_AA)
    cv2.rectangle(image, (int(rect[0] - 1), int(rect[1]) - 16),
                  (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1,
                  cv2.LINE_AA)
    img = Image.fromarray(image)
    draw = ImageDraw.Draw(img)
    draw.text((int(rect[0] + 1), int(rect[1] - 16)),
              addText, (255, 255, 255),
              font=fontC)
    imagex = np.array(img)
    return imagex


import HyperLPRLite as pr
import cv2
import numpy as np
image = grr = cv2.imread("data/test/15套车检_30010022722_T1.jpg")
model = pr.LPR("model/cascade.xml", "model/model12.h5",
               "model/ocr_plate_all_gru.h5")
for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(grr):
    #if confidence>0.7:
    image = drawRectBox(grr, rect, pstr + " " + str(round(confidence, 3)))
    print("plate_str:", pstr)
    print("plate_confidence", confidence)

cv2.imshow("image", image)
cv2.waitKey(0)

# SpeedTest("images_rec/2_.jpg")
Exemplo n.º 16
0
if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Single rec demo')

    parser.add_argument('--detect_path', action='store', dest='detect_path')
    parser.add_argument('--cascade_model_path', action='store', default='model/cascade.xml')
    parser.add_argument('--mapping_vertical_model_path', action='store', default='model/model12.h5')
    parser.add_argument('--ocr_plate_model_path', action='store', default='model/ocr_plate_all_gru.h5')
    parser.add_argument('--save_result_flag', action='store', default='True')
    parser.add_argument('--plot_result_flag', action='store', default='True')
    parser.add_argument('--save_path', action='store', default=None)

    args = parser.parse_args()

    model = pr.LPR(args.cascade_model_path, args.mapping_vertical_model_path, args.ocr_plate_model_path)
    grr = cv2.imread(args.detect_path)
    t0 = time.time()
    image = grr
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(grr):
            if confidence>0.7:
                pstr = pstr.encode('utf-8')
                image = drawRectBox(image, rect, pstr+" "+str(round(confidence,3)))
                print "plate_str:"
                print pstr
                print "plate_confidence"
                print confidence
    t = time.time() - t0
    print "Image size :" + str(grr.shape[1])+"x"+str(grr.shape[0]) +  " need " + str(round(t*1000,2))+"ms"

    if args.plot_result_flag == 'True' or args.plot_result_flag == 'true':
Exemplo n.º 17
0
tasks = Queue(maxsize=1024)

def drawRectBox(image,rect,addText):
    cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), (0,0, 255), 2,cv2.LINE_AA)
    cv2.rectangle(image, (int(rect[0]-1), int(rect[1])-16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1,
                  cv2.LINE_AA)
    img = Image.fromarray(image)
    draw = ImageDraw.Draw(img)
    draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode('utf-8'), (255, 255, 255), font=fontC)
    imagex = np.array(img)
    return imagex




model = pr.LPR('model/cascade.xml','model/model12.h5','model/ocr_plate_all_gru.h5')
def recognize(grr):
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(grr):
        if confidence>0.7:
            print 'plate_str: [%s], plate_confidence: [%.2f]' %(pstr.encode('utf-8'), confidence)
        else:
            print 'unable to recognize the plate'


def usage():
    print 'Usage:'
    print 'identifyPlate.py [-h|--help]'
    print 'identifyPlate.py -i|--image <IMAGE FILE>'
    print

def main(argv):
Exemplo n.º 18
0
import HyperLPRLite as pr
import cv2
import numpy as np
from PIL import Image, ImageDraw
import re
import gevent
from gevent import socket
from gevent.server import DatagramServer
from gevent.pool import Pool
from gevent import monkey

monkey.patch_all()
from alpr_config import root_dir
from alpr_logger import alpr_logger

model = pr.LPR(root_dir + 'model/cascade.xml', root_dir + 'model/model12.h5',
               root_dir + 'model/ocr_plate_all_gru.h5')


def recognize(imgFile):
    try:
        grr = cv2.imread(imgFile)
        for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(grr):
            if confidence > 0.8:
                alpr_logger.debug('plate_str: [%s], plate_confidence: [%.2f]' %
                                  (pstr.encode('utf-8'), confidence))
                return pstr
            else:
                alpr_logger.warning(
                    'Maybe wrong plate_str: [%s], plate_confidence: [%.2f]' %
                    (pstr.encode('utf-8'), confidence))
                return ''
Exemplo n.º 19
0
                  cv2.LINE_AA)
    img = Image.fromarray(image)
    draw = ImageDraw.Draw(img)
    draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode("utf-8"), (255, 255, 255), font=fontC)
    imagex = np.array(img)
    return imagex





import HyperLPRLite as pr
import cv2
import numpy as np
grr = cv2.imread("images_rec/21jk.jpg")
model = pr.LPR("model/cascade_lbp.xml","model/model12.h5","model/model.h5")
for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(grr):
        if confidence>0.7:
            image = drawRectBox(grr, rect, pstr+" "+str(round(confidence,3)))
            print ("plate_str:")
            print (pstr)
            print ("plate_confidence")
            print (confidence)
            print ("plate_borders:")
            print (rect)
            
cv2.imshow("image",image)
cv2.waitKey(0)


Exemplo n.º 20
0
                  (int(rect[0] + rect[2]), int(rect[1] + rect[3])),
                  (0, 0, 255), 2, cv2.LINE_AA)
    cv2.rectangle(image, (int(rect[0] - 1), int(rect[1]) - 16),
                  (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1,
                  cv2.LINE_AA)
    img = Image.fromarray(image)
    draw = ImageDraw.Draw(img)
    draw.text((int(rect[0] + 1), int(rect[1] - 16)),
              addText, (255, 255, 255),
              font=fontC)
    imagex = np.array(img)
    return imagex


model = pr.LPR(os.path.join(BASE_PATH, "model/cascade.xml"),
               os.path.join(BASE_PATH, "model/model12.h5"),
               os.path.join(BASE_PATH, "model/ocr_plate_all_gru.h5"))

# SpeedTest("images_rec/2.jpg")


def demo():
    grr = cv2.imread(os.path.join(BASE_PATH, "images_rec/1.jpg"))
    for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(grr):
        if confidence > 0.7:
            # image = drawRectBox(grr, rect, pstr+" " +
            #                     str(round(confidence, 3)))
            print("plate_str:")
            print(pstr)
            print("plate_confidence")
            print(confidence)
Exemplo n.º 21
0
 def __init__(self):
     self.model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5")
     print('init')
     pass
Exemplo n.º 22
0
def init():
    global model
    print('目录:', BASE_PATH)
    model = pr.LPR(os.path.join(BASE_PATH, "model", "cascade.xml"),
                   os.path.join(BASE_PATH, "model", "model12.h5"),
                   os.path.join(BASE_PATH, "model", "ocr_plate_all_gru.h5"))
Exemplo n.º 23
0
              addText, (255, 255, 255),
              font=fontC)
    imagex = np.array(img)
    return imagex


import HyperLPRLite as pr
import cv2
import numpy as np

SpeedTest("images_rec/2_resized.jpg")

img = Image.open("images_rec/1_resized.jpg")
model = pr.LPR("export_graph/edgetpu_models/detection_model_edgetpu.tflite",
               "export_graph/model12.h5",
               "export_graph/edgetpu_models/ocr_model_edgetpu.tflite",
               cnn=True,
               tpu=True)
for pstr, confidence, rect in model.SimpleRecognizePlateByE2E(img):
    #if confidence>0.1:
    image = drawRectBox(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR), rect,
                        pstr + " " + str(round(confidence, 3)))
    print("plate_str:")
    print(pstr)
    print("plate_confidence")
    print(confidence)

cv2.imshow("image", image)
cv2.waitKey(0)

cv2.destroyAllWindows()