예제 #1
0
파일: int.py 프로젝트: itnoneedteach/alpr
def LPDection(carImagePath):
    wpod_net = load_model(wpod_net_path)
    print carImagePath
    Ivehicle = cv2.imread(carImagePath)
    ratio = float(max(Ivehicle.shape[:2])) / min(Ivehicle.shape[:2])
    side = int(ratio * 288.)
    bound_dim = min(side + (side % (2**4)), 608)

    Llp, LlpImgs, _ = detect_lp(wpod_net, im2single(Ivehicle), bound_dim, 2**4,
                                (240, 80), lp_threshold)

    if len(LlpImgs):
        bname = basename(splitext(carImagePath)[0])
        dname = dirname(carImagePath)
        Ilp = LlpImgs[0]
        cv2.imwrite('%s/%s_lp_raw.png' % (dname, bname), Ilp * 255.)
        Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
        Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)

        s = Shape(Llp[0].pts)

        LPImagePath = '%s/%s_lp.png' % (dname, bname)
        cv2.imwrite(LPImagePath, Ilp * 255.)
        LPTextPath = '%s/%s_lp.txt' % (dname, bname)
        writeShapes(LPTextPath, [s])
        return LPImagePath
    else:
        return ""
    def __init__(self):
        self.path_images = "."
        path_yolo = './'
        self.output_dir = './tmp/'
        self.vehicle = Vehicle(path_yolo)
        self.lp = LP(path_yolo)

        wpod_net_path = "models/wpod-net_update1.h5"
        self.wpod_net = load_model(wpod_net_path)
        self.lp_threshold = .5
예제 #3
0
def load_network(modelpath,input_dim):
	model = load_model(modelpath)
	input_shape = (input_dim,input_dim,3)
	# Fixed input size for training
	inputs = keras.layers.Input(shape=(input_dim,input_dim,3))
	outputs = model(inputs)
	output_shape = tuple([s.value for s in outputs.shape[1:]])
	output_dim = output_shape[1]
	model_stride = input_dim / output_dim
	assert input_dim % output_dim == 0, \
		'The output resolution must be divisible by the input resolution'
	assert model_stride == 2**4, \
		'Make sure your model generates a feature map with resolution ' \
		'16x smaller than the input'
	return model, model_stride, input_shape, output_shape
예제 #4
0
	def __init__(self, config, det_thresh=0.5, rec_thresh=0.5):

		## --- update below versions
		self.mod_version = "1.2"
		self.det_version = "1.2"
		self.rec_version = "1.1"
		## -------------------------

		if config.det_mod == "":
			return

		#self.consolidator = Consolidator()
		self.utils = Utils()

		self.det_model = config.det_mod
		self.rec_model = config.rec_mod
		self.det_thresh = det_thresh;
		self.rec_thresh = rec_thresh;

		self.cons_lp_enabled = config.cons_lp
		self.roi_det_enabled = config.do_roi_det

		self.ocr_weights = ""
		self.ocr_netcfg = ""
		self.ocr_dataset = ""

		self.ocr_net = None
		self.ocr_meta = None

		self.wpod_net = None

		self.prev_locs = None

		if self.det_model != ""  and os.path.exists(self.det_model):
			det_model = [f for f in os.listdir(self.det_model) if ".h5" in f]
			self.det_model += "/"+det_model[0]

			print ("INFO:: Loding model: ", self.det_model)
			self.wpod_net = load_model(self.det_model)


			self.load_rec_model(self.rec_model)

		else:
			print ("\033[93m Warning: Specify detection model by giving \"--det-model <path>\" option.\033[0m")
예제 #5
0
 def __init__(self, wpod_lpd_net_path, modelo_deteccao_veiculos,
              modelo_deteccao_caracteres, caminho_char_recognition_resnet):
     self.graph_car_detect = tf.Graph()
     self.session_car_detection = tf.Session(graph=self.graph_car_detect)
     self.wpod_net = load_model(wpod_lpd_net_path)
     self.return_tensors_veichle = utils.read_pb_return_tensors(
         self.graph_car_detect, modelo_deteccao_veiculos, return_elements)
     char_dectection_model_pb_file = modelo_deteccao_caracteres + '.pb'
     char_dectection_model_meta_file = modelo_deteccao_caracteres + '.meta'
     options_char_detec = {
         "pbLoad": char_dectection_model_pb_file,
         "metaLoad": char_dectection_model_meta_file,
         "gpu": 0.9
     }
     self.yolo_char_seg_model = TFNet(options_char_detec)
     self.model_char_recog = keras.models.load_model(
         caminho_char_recognition_resnet)
     self.color_bbox_car = (255, 0, 0)
     self.color_text_plate = (0, 255, 0)
     self.ct = centroidtracker.CentroidTracker()
예제 #6
0
def load_network(modelpath, input_dim):
    # input_dim = 208
    model = load_model(modelpath)
    input_shape = (input_dim, input_dim, 3)

    # Fixed input size for training
    # Số chiều đầu vào là 208*208
    inputs = keras.layers.Input(shape=(input_dim, input_dim, 3))
    outputs = model(inputs)

    output_shape = tuple([s.value for s in outputs.shape[1:]])
    output_dim = output_shape[1]
    model_stride = input_dim / output_dim
    '''kiểm tra điều kiện, nếu sai > out, kèm exception'''
    assert input_dim % output_dim == 0, \
     'The output resolution must be divisible by the input resolution'

    assert model_stride == 2**4, \
     'Make sure your model generates a feature map with resolution ' \
     '16x smaller than the input'

    return model, model_stride, input_shape, output_shape
예제 #7
0
def exit_gate():
    def sample(probs):
        s = sum(probs)
        probs = [old_div(a,s) for a in probs]
        r = random.uniform(0, 1)
        for i in range(len(probs)):
            r = r - probs[i]
            if r <= 0:
                return i
        return len(probs)-1

    def c_array(ctype, values):
        arr = (ctype*len(values))()
        arr[:] = values
        return arr

    class BOX(Structure):
        _fields_ = [("x", c_float),
                    ("y", c_float),
                    ("w", c_float),
                    ("h", c_float)]

    class DETECTION(Structure):
        _fields_ = [("bbox", BOX),
                    ("classes", c_int),
                    ("prob", POINTER(c_float)),
                    ("mask", POINTER(c_float)),
                    ("objectness", c_float),
                    ("sort_class", c_int)]

    class IMAGE(Structure):
        _fields_ = [("w", c_int),
                    ("h", c_int),
                    ("c", c_int),
                    ("data", POINTER(c_float))]

    class METADATA(Structure):
        _fields_ = [("classes", c_int),
                    ("names", POINTER(c_char_p))]

    class IplROI(Structure):
        pass

    class IplTileInfo(Structure):
        pass

    class IplImage(Structure):
        pass

    IplImage._fields_ = [
        ('nSize', c_int),
        ('ID', c_int),
        ('nChannels', c_int),
        ('alphaChannel', c_int),
        ('depth', c_int),
        ('colorModel', c_char * 4),
        ('channelSeq', c_char * 4),
        ('dataOrder', c_int),
        ('origin', c_int),
        ('align', c_int),
        ('width', c_int),
        ('height', c_int),
        ('roi', POINTER(IplROI)),
        ('maskROI', POINTER(IplImage)),
        ('imageId', c_void_p),
        ('tileInfo', POINTER(IplTileInfo)),
        ('imageSize', c_int),
        ('imageData', c_char_p),
        ('widthStep', c_int),
        ('BorderMode', c_int * 4),
        ('BorderConst', c_int * 4),
        ('imageDataOrigin', c_char_p)]

    class iplimage_t(Structure):
        _fields_ = [('ob_refcnt', c_ssize_t),
                    ('ob_type',  py_object),
                    ('a', POINTER(IplImage)),
                    ('data', py_object),
                    ('offset', c_size_t)]

    lib = CDLL("./darknet/libdarknet.so", RTLD_GLOBAL)
    lib.network_width.argtypes = [c_void_p]
    lib.network_width.restype = c_int
    lib.network_height.argtypes = [c_void_p]
    lib.network_height.restype = c_int

    predict = lib.network_predict
    predict.argtypes = [c_void_p, POINTER(c_float)]
    predict.restype = POINTER(c_float)

    set_gpu = lib.cuda_set_device
    set_gpu.argtypes = [c_int]

    make_image = lib.make_image
    make_image.argtypes = [c_int, c_int, c_int]
    make_image.restype = IMAGE

    get_network_boxes = lib.get_network_boxes
    get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float,
                                  c_float, POINTER(c_int), c_int, POINTER(c_int)]
    get_network_boxes.restype = POINTER(DETECTION)

    make_network_boxes = lib.make_network_boxes
    make_network_boxes.argtypes = [c_void_p]
    make_network_boxes.restype = POINTER(DETECTION)

    free_detections = lib.free_detections
    free_detections.argtypes = [POINTER(DETECTION), c_int]

    free_ptrs = lib.free_ptrs
    free_ptrs.argtypes = [POINTER(c_void_p), c_int]

    network_predict = lib.network_predict
    network_predict.argtypes = [c_void_p, POINTER(c_float)]

    reset_rnn = lib.reset_rnn
    reset_rnn.argtypes = [c_void_p]

    load_net = lib.load_network
    load_net.argtypes = [c_char_p, c_char_p, c_int]
    load_net.restype = c_void_p

    do_nms_obj = lib.do_nms_obj
    do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

    do_nms_sort = lib.do_nms_sort
    do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

    free_image = lib.free_image
    free_image.argtypes = [IMAGE]

    letterbox_image = lib.letterbox_image
    letterbox_image.argtypes = [IMAGE, c_int, c_int]
    letterbox_image.restype = IMAGE

    load_meta = lib.get_metadata
    lib.get_metadata.argtypes = [c_char_p]
    lib.get_metadata.restype = METADATA

    load_image = lib.load_image_color
    load_image.argtypes = [c_char_p, c_int, c_int]
    load_image.restype = IMAGE

    rgbgr_image = lib.rgbgr_image
    rgbgr_image.argtypes = [IMAGE]

    predict_image = lib.network_predict_image
    predict_image.argtypes = [c_void_p, IMAGE]
    predict_image.restype = POINTER(c_float)

    def classify(net, meta, im):
        out = predict_image(net, im)
        res = []
        for i in range(meta.classes):
            res.append((meta.names[i], out[i]))
        res = sorted(res, key=lambda x: -x[1])
        return res

    def array_to_image(arr):
        # need to return old values to avoid python freeing memory
        arr = arr.transpose(2, 0, 1)
        c, h, w = arr.shape[0:3]
        arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
        #print(arr)
        data = arr.ctypes.data_as(POINTER(c_float))
        im = IMAGE(w, h, c, data)
        return im, arr

    def detect(net, meta, image,thresh,hier_thresh=.5, nms=.45):

        im, image = array_to_image(image)
        rgbgr_image(im)
        num = c_int(0)
        pnum = pointer(num)
        predict_image(net, im)
        dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
        num = pnum[0]
        if nms:
            do_nms_obj(dets, num, meta.classes, nms)

        res = []
        for j in range(num):
            a = dets[j].prob[0:meta.classes]
            if any(a):
                ai = np.array(a).nonzero()[0]
                for i in ai:
                    b = dets[j].bbox
                    res.append((meta.names[i], dets[j].prob[i],
                               (b.x, b.y, b.w, b.h)))

        res = sorted(res, key=lambda x: -x[1])
        wh = (im.w, im.h)
        if isinstance(image, bytes):
            free_image(im)
        free_detections(dets, num)
        return res
    def cdetect(net, meta, image,thresh=0.3,hier_thresh=.7, nms=.45):

        im, image = array_to_image(image)
        rgbgr_image(im)
        num = c_int(0)
        pnum = pointer(num)
        predict_image(net, im)
        dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
        num = pnum[0]
        if nms:
            do_nms_obj(dets, num, meta.classes, nms)

        res = []
        for j in range(num):
            a = dets[j].prob[0:meta.classes]
            if any(a):
                ai = np.array(a).nonzero()[0]
                for i in ai:
                    b = dets[j].bbox
                    res.append((meta.names[i], dets[j].prob[i],
                               (b.x, b.y, b.w, b.h)))

        res = sorted(res, key=lambda x: -x[1])
        wh = (im.w, im.h)
        if isinstance(image, bytes):
            free_image(im)
        free_detections(dets, num)
        return res


    def adjust_pts(pts, lroi):
        return pts*lroi.wh().reshape((2, 1)) + lroi.tl().reshape((2, 1))


    def text_extract(img_path, height, width):
        '''gray = cv2.cvtColor(img_path, cv2.COLOR_BGR2GRAY)

        gray = cv2.threshold(
            gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

        inv = 255 - gray
        horizontal_img = inv
        vertical_img = inv

        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (100, 1))
        horizontal_img = cv2.erode(horizontal_img, kernel, iterations=1)
        horizontal_img = cv2.dilate(horizontal_img, kernel, iterations=1)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 100))
        vertical_img = cv2.erode(vertical_img, kernel, iterations=1)
        vertical_img = cv2.dilate(vertical_img, kernel, iterations=1)

        mask_img = horizontal_img + vertical_img

        no_border = np.bitwise_or(gray, mask_img)
        os.system("convert " + " no.jpg" + " -bordercolor " +
                  " White" + " -border" + " 10x10" + " with_border.jpg")
        imagez = cv2.imread("with_border.jpg")
        p1 = (20, 30)
        p2 = (60, 80)
        p3 = (60, 15)
        p4 = (200, 45)
        (x1, y1) = (20, 30)
        (x2, y2) = (60, 80)
        (x3, y3) = (60, 15)
        (x4, y4) = (200, 45)

        color = (255, 0, 0)
        thickness = 2
        image1 = cv2.rectangle(imagez, p1, p2, color, thickness)
        image2 = cv2.rectangle(imagez, p3, p4, color, thickness)

        roi1 = image1[y1:y2, x1:x2]

        roi2 = image2[y3:y4, x3:x4]'''

        ocr_threshold = .4
        R = detect(
            ocr_net, ocr_meta, img_path, thresh=ocr_threshold, nms=None)
        if len(R):
            L = dknet_label_conversion(R, width, height)
            L = nms(L, .45)
            L.sort(key=lambda x: x.tl()[0])
            lp_str1 = ''.join([chr(l.cl()) for l in L])
        print("License plate ----",lp_str1)

    def lp_detector(Ivehicle, wpod_net):

        lp_threshold = .2
        ratio = float(max(Ivehicle.shape[:2]))/min(Ivehicle.shape[:2])
        side = int(ratio*288.)
        bound_dim = min(side + (side%(2**4)), 608)
        Llp, LlpImgs, _ = detect_lp(wpod_net, im2single(
            Ivehicle), bound_dim, 2**4, (240, 80), lp_threshold)
        if len(LlpImgs):
            Ilp = LlpImgs[0]
            Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
            Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
            cv2.imwrite("lp1.png", Ilp*255.)
        return len(LlpImgs)


    def car_detect(Iorig):
        Lcars=[]
        R = []
        vehicle_threshold = .5
        R = detect(vehicle_net, vehicle_meta, Iorig,thresh=vehicle_threshold)
	
        R = [r for r in R if r[0] in [b'car','bus']]
        
        #print('\t\t%d cars found' % len(R))
        if len(R):
            WH = np.array(Iorig.shape[1::-1],dtype=float)
            Lcars = []
            for i,r in enumerate(R):
                cx,cy,w,h = (old_div(np.array(r[2]),np.concatenate( (WH,WH) ))).tolist()
                tl = np.array([cx - w/2., cy - h/2.])
                br = np.array([cx + w/2., cy + h/2.])
                label = Label(0,tl,br)
                Icar = crop_region(Iorig,label)
                cv2.imwrite("crop1.png",Icar)
                Icar1 = cv2.imread("crop1.png")
                Lcars.append(label)
        return Lcars

    ocr_weights = 'data/exit-ocr/ocr-net.weights'
    ocr_netcfg = 'data/exit-ocr/ocr-net.cfg'
    ocr_dataset = 'data/exit-ocr/ocr-net.data'
    ocr_net = load_net(ocr_netcfg.encode('utf-8'), ocr_weights.encode('utf-8'), 0)
    ocr_meta = load_meta(ocr_dataset.encode('utf-8'))
    wpod_net = load_model("data/exit-lp-detector/wpod-net_update1.h5")
    vehicle_threshold = .5
    vehicle_weights = 'data/exit-vehicle-detector/yolov3-tiny.weights'
    vehicle_netcfg = 'data/exit-vehicle-detector/yolov3-tiny.cfg'
    vehicle_dataset = 'data/exit-vehicle-detector/coco.data'
    vehicle_net = load_net(vehicle_netcfg.encode('utf-8'), vehicle_weights.encode('utf-8'), 0)
    vehicle_meta = load_meta(vehicle_dataset.encode('utf-8'))
    countd = 1
    img = cv2.imread("ar1.jpg")

    start_veh_det = time.time()
    lab=car_detect(img)
    end_veh_det = time.time()
    veh_det_time = end_veh_det - start_veh_det
    print("veh-det time(exit) :- ", veh_det_time)
    if(len(lab)!=0):
        for i in range(len(lab)):
            # LP detection
            start_lp_det = time.time()
            crop1=cv2.imread("crop1.png")
            cv2.imshow("Car 1", crop1)
            lp = lp_detector(crop1, wpod_net)
            end_lp_det = time.time()
            end_lp_det = time.time()
            lp_det_time = end_lp_det - start_lp_det
            print("lp-det time(exit) :- ",lp_det_time)
            lp = cv2.imread("lp1.png")
            cv2.imshow("LP 1", lp)
            height = lp.shape[0]
            width = lp.shape[1]

            # OCR
            start_ocr = time.time()
            text = text_extract(lp, height, width)
            end_ocr = time.time()
            ocr_time = end_ocr - start_ocr
            print("ocr time (exit) :- ",ocr_time)
            cv2.waitKey(10)
예제 #8
0
if __name__ == '__main__':
    try:
        input_file = sys.argv[1]
        output_file = sys.argv[2]
        # vehicle detection model
        vehicle_threshold = .5

        vehicle_weights = b'darknetAB/yolov3.weights'
        vehicle_netcfg = b'darknetAB/cfg/yolov3.cfg'
        vehicle_dataset = b'darknetAB/cfg/coco.data'

        vehicle_net = dn.load_net_custom(vehicle_netcfg, vehicle_weights, 0, 1)  # batchsize=1
        vehicle_meta = dn.load_meta(vehicle_dataset)

        # license plate detection model
        wpod_net = load_model('data/lp-detector/wpod-net_update1.h5')

        # license plate recognition model
        ocrmodel = LPR("data/ocr-model/ocr_plate_all_gru.h5")

        # Create an image we reuse for each detect
        vid = cv2.VideoCapture(input_file)
        video_width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
        video_height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        darknet_image = dn.make_image(int(video_width),int(video_height), 3)
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        videoWriter = cv2.VideoWriter(output_file, fourcc, int(video_fps),
                                      (int(video_width),int(video_height)))
        print('Searching for vehicles and licenses using YOLO and Keras...')
        frame = 1
예제 #9
0
def exit_gate():
    def sample(probs):
        for i in range(len([old_div(a, sum(probs)) for a in probs])):
            if random.uniform(0, 1) - probs[i] <= 0:
                return i
        return len(probs) - 1

    def c_array(ctype, values):
        arr = (ctype * len(values))()
        arr[:] = values
        return arr

    class BOX(Structure):
        _fields_ = [("x", c_float), ("y", c_float), ("w", c_float),
                    ("h", c_float)]

    class DETECTION(Structure):
        _fields_ = [("bbox", BOX), ("classes", c_int),
                    ("prob", POINTER(c_float)), ("mask", POINTER(c_float)),
                    ("objectness", c_float), ("sort_class", c_int)]

    class IMAGE(Structure):
        _fields_ = [("w", c_int), ("h", c_int), ("c", c_int),
                    ("data", POINTER(c_float))]

    class METADATA(Structure):
        _fields_ = [("classes", c_int), ("names", POINTER(c_char_p))]

    class IplROI(Structure):
        pass

    class IplTileInfo(Structure):
        pass

    class IplImage(Structure):
        pass

    IplImage._fields_ = [('nSize', c_int), ('ID', c_int), ('nChannels', c_int),
                         ('alphaChannel', c_int), ('depth', c_int),
                         ('colorModel', c_char * 4),
                         ('channelSeq', c_char * 4), ('dataOrder', c_int),
                         ('origin', c_int), ('align', c_int), ('width', c_int),
                         ('height', c_int), ('roi', POINTER(IplROI)),
                         ('maskROI', POINTER(IplImage)), ('imageId', c_void_p),
                         ('tileInfo', POINTER(IplTileInfo)),
                         ('imageSize', c_int), ('imageData', c_char_p),
                         ('widthStep', c_int), ('BorderMode', c_int * 4),
                         ('BorderConst', c_int * 4),
                         ('imageDataOrigin', c_char_p)]

    class iplimage_t(Structure):
        _fields_ = [('ob_refcnt', c_ssize_t), ('ob_type', py_object),
                    ('a', POINTER(IplImage)), ('data', py_object),
                    ('offset', c_size_t)]

    lib = CDLL("./darknet/libdarknet.so", RTLD_GLOBAL)
    lib.network_width.argtypes = [c_void_p]
    lib.network_width.restype = c_int
    lib.network_height.argtypes = [c_void_p]
    lib.network_height.restype = c_int

    predict = lib.network_predict
    predict.argtypes = [c_void_p, POINTER(c_float)]
    predict.restype = POINTER(c_float)

    set_gpu = lib.cuda_set_device
    set_gpu.argtypes = [c_int]

    make_image = lib.make_image
    make_image.argtypes = [c_int, c_int, c_int]
    make_image.restype = IMAGE

    get_network_boxes = lib.get_network_boxes
    get_network_boxes.argtypes = [
        c_void_p, c_int, c_int, c_float, c_float,
        POINTER(c_int), c_int,
        POINTER(c_int)
    ]
    get_network_boxes.restype = POINTER(DETECTION)

    make_network_boxes = lib.make_network_boxes
    make_network_boxes.argtypes = [c_void_p]
    make_network_boxes.restype = POINTER(DETECTION)

    free_detections = lib.free_detections
    free_detections.argtypes = [POINTER(DETECTION), c_int]

    free_ptrs = lib.free_ptrs
    free_ptrs.argtypes = [POINTER(c_void_p), c_int]

    network_predict = lib.network_predict
    network_predict.argtypes = [c_void_p, POINTER(c_float)]

    reset_rnn = lib.reset_rnn
    reset_rnn.argtypes = [c_void_p]

    load_net = lib.load_network
    load_net.argtypes = [c_char_p, c_char_p, c_int]
    load_net.restype = c_void_p

    do_nms_obj = lib.do_nms_obj
    do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

    do_nms_sort = lib.do_nms_sort
    do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

    free_image = lib.free_image
    free_image.argtypes = [IMAGE]

    letterbox_image = lib.letterbox_image
    letterbox_image.argtypes = [IMAGE, c_int, c_int]
    letterbox_image.restype = IMAGE

    load_meta = lib.get_metadata
    lib.get_metadata.argtypes = [c_char_p]
    lib.get_metadata.restype = METADATA

    load_image = lib.load_image_color
    load_image.argtypes = [c_char_p, c_int, c_int]
    load_image.restype = IMAGE

    rgbgr_image = lib.rgbgr_image
    rgbgr_image.argtypes = [IMAGE]

    predict_image = lib.network_predict_image
    predict_image.argtypes = [c_void_p, IMAGE]
    predict_image.restype = POINTER(c_float)

    def classify(net, meta, im):
        out = predict_image(net, im)
        res = []
        for i in range(meta.classes):
            res.append((meta.names[i], out[i]))
        return sorted(res, key=lambda x: -x[1])

    def array_to_image(arr):
        # need to return old values to avoid python freeing memory
        arr = arr.transpose(2, 0, 1)
        c, h, w = arr.shape[0:3]
        arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
        data = arr.ctypes.data_as(POINTER(c_float))
        im = IMAGE(w, h, c, data)
        return im, arr

    def detect(net, meta, image, thresh, hier_thresh=.5, nms=.45):

        im, image = array_to_image(image)
        rgbgr_image(im)
        pnum = pointer(c_int(0))
        predict_image(net, im)
        dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0,
                                 pnum)
        num = pnum[0]
        if nms:
            do_nms_obj(dets, num, meta.classes, nms)

        res = []
        for j in range(num):
            if any(dets[j].prob[0:meta.classes]):
                ai = np.array(dets[j].prob[0:meta.classes]).nonzero()[0]
                for i in ai:
                    b = dets[j].bbox
                    res.append(
                        (meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))

        res = sorted(res, key=lambda x: -x[1])
        wh = (im.w, im.h)
        if isinstance(image, bytes):
            free_image(im)
        free_detections(dets, num)
        return res

    def adjust_pts(pts, lroi):
        return pts * lroi.wh().reshape((2, 1)) + lroi.tl().reshape((2, 1))

    def text_extract(img_path, height, width):
        R = detect(ocr_net, ocr_meta, img_path, thresh=0.4, nms=None)
        if len(R):
            L = dknet_label_conversion(R, width, height)
            L = nms(L, .45)
            L.sort(key=lambda x: x.tl()[0])
            lp_str1 = ''.join([chr(l.cl()) for l in L])
        print("License plate ----", lp_str1)

    def lp_detector(Ivehicle, wpod_net):

        lp_threshold = .2
        ratio = float(max(Ivehicle.shape[:2])) / min(Ivehicle.shape[:2])
        side = int(ratio * 288.)
        bound_dim = min(side + (side % (2**4)), 608)
        Llp, LlpImgs, _ = detect_lp(wpod_net, im2single(Ivehicle), bound_dim,
                                    2**4, (240, 80), lp_threshold)
        if len(LlpImgs):
            Ilp = LlpImgs[0]
            cv2.imwrite("lp1.png", Ilp * 255.)
        return Ilp * 255

    def car_detect(Iorig):
        Lcars = []
        R = []
        R = detect(vehicle_net, vehicle_meta, Iorig, thresh=0.5)

        R = [r for r in R if r[0] in [b'car', 'bus']]

        if len(R):
            WH = np.array(Iorig.shape[1::-1], dtype=float)
            Lcars = []
            cars_crop = []
            for i, r in enumerate(R):
                cx, cy, w, h = (old_div(np.array(r[2]), np.concatenate(
                    (WH, WH)))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label = Label(0, tl, br)
                Icar = crop_region(Iorig, label)
                cv2.imwrite("crop1.png", Icar)
                Icar1 = cv2.imread("crop1.png")
                Lcars.append(label)
                cars_crop.append(Icar1)
            return Lcars, cars_crop

    ocr_weights = 'data/exit-ocr/ocr-net.weights'
    ocr_netcfg = 'data/exit-ocr/ocr-net.cfg'
    ocr_dataset = 'data/exit-ocr/ocr-net.data'
    ocr_net = load_net(ocr_netcfg.encode('utf-8'), ocr_weights.encode('utf-8'),
                       0)
    ocr_meta = load_meta(ocr_dataset.encode('utf-8'))
    wpod_net = load_model("data/exit-lp-detector/wpod-net_update1.h5")
    vehicle_threshold = .5
    vehicle_weights = 'data/exit-vehicle-detector/yolov3-tiny.weights'
    vehicle_netcfg = 'data/exit-vehicle-detector/yolov3-tiny.cfg'
    vehicle_dataset = 'data/exit-vehicle-detector/coco.data'
    vehicle_net = load_net(vehicle_netcfg.encode('utf-8'),
                           vehicle_weights.encode('utf-8'), 0)
    vehicle_meta = load_meta(vehicle_dataset.encode('utf-8'))
    countd = 1
    lab = []
    cars = []
    img = cv2.imread("ar1.jpg")
    start_veh_det = time.time()
    lab, cars = car_detect(img)
    end_veh_det = time.time()
    veh_det_time = end_veh_det - start_veh_det
    print("veh-det time(exit) :- ", veh_det_time)
    if (len(cars) != 0):
        for i in range(len(cars)):
            # LP detection
            start_lp_det = time.time()
            cv2.imshow("Car" + str(i), cars[i])
            lp = lp_detector(cars[i], wpod_net)
            end_lp_det = time.time()
            end_lp_det = time.time()
            lp_det_time = end_lp_det - start_lp_det
            print("lp-det time(exit) :- ", lp_det_time)
            cv2.imshow("LP" + str(i), lp / 255.)
            height = lp.shape[0]
            width = lp.shape[1]

            # OCR
            start_ocr = time.time()
            text = text_extract(lp, height, width)
            end_ocr = time.time()
            ocr_time = end_ocr - start_ocr
            print("ocr time (exit) :- ", ocr_time)
            cv2.waitKey(10)
예제 #10
0
# Custom code to handle memory allocation problem
# config = tf.ConfigProto()
# dynamically grow GPU memory
# config.gpu_options.allow_growth = True
# set_session(tf.Session(config=config))

from src.keras_utils import load_model
from gen_outputs import generate_outputs
from stdout_capture import Capturing
from license_plate_ocr import lp_ocr
from license_plate_detection import detect

# Necessary for some thread safety stuff, dont really looked into it
global graph
graph = tf.get_default_graph()
WPOD_NET = load_model("./data/lp-detector/wpod-net_update1.h5")

app = Flask(__name__)

# RUN_SCRIPT_FOLDER = "/alpr-unconstrained"
CASE_FOLDER_NAMES = ["0_case", "1_case", "2_case"]
MAIN_FOLDER_PATH = "/mnt"
# MAIN_FOLDER_PATH = "/home/atoth/temp/generali/prod_test/test"


@app.route('/lpr/<session_id>')
def run_lpr(session_id):
    # base_wd = os.getcwd()

    processed_images_folder = join(MAIN_FOLDER_PATH, session_id,
                                   "processed_images")
 def __init__(self, lp_model, threshold=0.7):
     self.wpod_net = load_model(lp_model)
     self.output_dir = 'output'
     self.detected_cars_dir = "detected_cars"
     self.detected_plates_dir = "detected_plates"
     self.lp_threshold = threshold
예제 #12
0
	def __init__(lp_model):
		self.wpod_net = load_model(lp_model)
def validate_model(wpod_net_path, validate_dir, output_dir):
    wpod_net = load_model(wpod_net_path)
    for layer in wpod_net.layers:
        print(layer.output_shape)
    validar_lp_model(validate_dir, output_dir, wpod_net)
예제 #14
0
파일: detect.py 프로젝트: hy-xiong/alpr_vid
def load_lp_net(lp_net_path):
    return load_model(lp_net_path)
예제 #15
0
 def __init__(self, wpod_net_path="data/lp-detector/wpod-net_update1.h5", lp_threshold=.5):
     self.wpod_net = load_model(wpod_net_path)
     self.lp_threshold = lp_threshold
    args = parser.parse_args()

    netname = basename(args.name)
    train_dir = args.input_dir
    modelpath = args.model
    outdir = args.outdir

    iterations = args.iterations
    batch_size = args.batch_size
    dim = 208

    if not isdir(outdir):
        makedirs(outdir)

    if modelpath:
        model = load_model(modelpath)
    else:
        model = create_model()

    model_stride = 2**4

    model.compile(loss=loss, optimizer='adam')

    print 'Checking input directory...'
    Files = image_files_from_folder(train_dir)

    Data = []
    for file in Files:
        labfile = splitext(file)[0] + '.txt'
        if isfile(labfile):
            L = readShapes(labfile)
예제 #17
0
if __name__ == '__main__':

    try:

        # input_dir  = sys.argv[1]
        input_dir = "/home/nam/Desktop/folder/alpr-unconstrained-master/samples/t1"
        # input_dir = "/home/nam/Desktop/folder/data/car_long"
        # input_dir = "/home/nam/Desktop/folder/data/GreenParking"
        output_dir = input_dir

        lp_threshold = .5

        # wpod_net_path = sys.argv[2]
        # wpod_net_path = "/home/nam/Desktop/folder/alpr-unconstrained-master/data/lp-detector/wpod-net_update1.h5"
        wpod_net_path = "/home/nam/Desktop/folder/alpr-unconstrained-master/models/eccv-model-scracth.h5"
        wpod_net = load_model(wpod_net_path)

        # imgs_paths = glob('%s/*car.png' % input_dir)
        imgs_paths = input_dir
        print('Searching for license plates using WPOD-NET')

        # for i,img_path in enumerate(imgs_paths):
        for root, dir, file in os.walk(imgs_paths):
            for img_path in file:
                print('\t Processing %s' % img_path)
                img_path = os.path.join(imgs_paths, img_path)
                bname = splitext(basename(img_path))[0]
                Ivehicle = cv2.imread(img_path)
                im = cv2.resize(Ivehicle, (900, 500))
                cv2.imshow("asd", im)
                #cv2.waitKey(0)
    unq_ids = ['id' + str(i) for i in range(100)]
    track_id_list = deque(unq_ids)
    #track_id_list= deque(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K','L','M','N','O'])
    avg_fps = 0

    debug = False
    interestedClass = ["car", "truck", "bus",
                       "train"]  # Interested classes for yolo
    #Initialize yolo detector
    yolo_det = YoloDetector(interestedClass, size=size)
    #Initialize ReID model
    reid_model_path = os.path.abspath('ReID_CNN/model_880_base.ckpt')
    reid_model = ResNet_Loader(reid_model_path, 50, 32)
    #Intialize License Plate detector
    lp_model = "data/lp-detector/wpod-net_update1.h5"
    wpod_net = load_model(lp_model)
    #License Plate detection threshold
    lp_threshold = 0.8

    # test on a video file.
    input_file = args.input
    output_dir = args.output
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    input_basename = os.path.basename(input_file)
    detected_plates_dir = output_dir + '/' + 'license_plates'
    detected_cars_dir = output_dir + '/' + 'cars'
    if not os.path.exists(detected_plates_dir):
        os.makedirs(detected_plates_dir)
    if not os.path.exists(detected_cars_dir):