예제 #1
0
def do_fr_after_YOLO(imgs_paths):
    output_dir = 'output_txt/fr_after_YOLO_kr'
    # using yolo v2 - voc
    YOLO_weights = 'data/vehicle-detector/yolov3.weights'
    YOLO_netcfg = 'data/vehicle-detector/yolov3.cfg'
    YOLO_data = 'data/vehicle-detector/coco.data'

    print 'YOLOv3 weights pre-loading...'
    YOLO_net = dn.load_net(YOLO_netcfg, YOLO_weights, 0)
    YOLO_meta = dn.load_meta(YOLO_data)
    threshold = 0.5

    for img_path in imgs_paths:
        print 'detecting cars in', img_path
        img = cv2.imread(img_path)
        results, wh = dn.detect(YOLO_net, YOLO_meta, img, threshold)
        txt_file = open(
            join(output_dir,
                 basename(splitext(img_path)[0]) + '.txt'), 'w')
        if len(results) == 0:
            txt_file.write('')
            continue
        for result in results:
            if result[0] in ['car', 'bus']:
                WH = np.array(img.shape[1::-1], dtype=float)
                cx, cy, w, h = (np.array(result[2]) / np.concatenate(
                    (WH, WH))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label_sub = Label(tl=tl, br=br)
                sub_img = crop_region(img, label_sub)

                # sub_image FRD, only process the highest prob one
                print '\tFRD processing...'
                frd, _ = dn.detect(FR_net, FR_meta, sub_img, threshold)
                if len(frd) == 0:
                    continue
                WH_sub = np.array(sub_img.shape[1::-1], dtype=float)
                cx, cy, w, h = (np.array(frd[0][2]) / np.concatenate(
                    (WH_sub, WH_sub))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label_FR = Label(tl=tl, br=br)
                label_scale_up = Label(
                    tl=label_sub.tl() * WH + label_FR.tl() * WH_sub,
                    br=label_sub.tl() * WH + label_FR.br() * WH_sub)
                tl = label_scale_up.tl().astype(int)
                br = label_scale_up.br().astype(int)
                txt_file.write(frd[0][0] + ' ' + str('%.2f' % frd[0][1]) +
                               ' ' + str(tl[0]) + ' ' + str(tl[1]) + ' ' +
                               str(br[0]) + ' ' + str(br[1]) + '\n')
                print '\twrote result to', join(
                    output_dir,
                    basename(splitext(img_path)[0]) + '.txt')
        txt_file.close()
예제 #2
0
파일: int.py 프로젝트: itnoneedteach/alpr
def OCRDection(LPImagePath):
    # OCR recognition
    W, (width, height) = detect(ocr_net,
                                ocr_meta,
                                LPImagePath,
                                thresh=ocr_threshold,
                                nms=None)
    if len(W):
        L = dknet_label_conversion(W, width, height)
        L = nms(L, .45)
        L.sort(key=lambda x: x.tl()[0])
        lp_str = ''.join([chr(l.cl()) for l in L])

        # plates always upper case, do not contain "I", "O" or "Q"
        lp_str = lp_str.upper()
        lp_str = lp_str.replace("I", "1")
        lp_str = lp_str.replace("O", "0")
        lp_str = lp_str.replace("Q", "0")

        bname = basename(splitext(LPImagePath)[0])
        dname = dirname(LPImagePath)
        LPTextPath = '%s/%s_str.txt' % (dname, bname)

        with open(LPTextPath, 'w') as f:
            f.write(lp_str + '\n')
        return lp_str
    else:
        return ""
예제 #3
0
    def detect_vehicles(self, img_path):
        vehicle_threshold = 0.5
        classes = ['car', 'bus', 'truck', 'motorcycle']

        results, _ = dn.detect(self.vehicle_net,
                               self.vehicle_meta,
                               img_path,
                               thresh=vehicle_threshold)
        results = [
            result for result in results
            if result[0].decode('utf-8') in classes
        ]
        vehicles = []

        if len(results):
            image = cv2.imread(img_path)
            WH = np.array(image.shape[1::-1], dtype=float)

            for i, result in enumerate(results):
                cx, cy, w, h = (np.array(result[2]) / np.concatenate(
                    (WH, WH))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label = Label(0, tl, br)
                vehicle = crop_region(image, label)

                results[i] = (result[0], result[1], result[2], vehicle)
                vehicles.append(label)

        return vehicles
예제 #4
0
def video_stream():
    global video_camera, net, meta, data
    count = 0
    alert_classes = []  # target classes
    alert_classes += accept_cls
    current = datetime.now()
    if video_camera == None:
        video_camera = VideoCamera(camera=camera, alert_classes=alert_classes)
    while True:
        status = 0
        count += 1
        img = video_camera.get_frame(byte=False)
        if img is not None:
            detected_objects = detect(net, meta, img, thresh=thresh)
            for obj, confidence, rect in detected_objects:
                detected_class = obj.decode('utf-8')
                status = pgm[detected_class]
            frame, is_alert = video_camera.draw_yolo(
                detected_objects=detected_objects)
            data = {
                'frame': frame,
                'camera_id': 0,  # fixed camera id (Int)
                'is_alert': is_alert,
            }
            if datetime.now() > (current + timedelta(seconds=1)):
                data['fps'] = count
                count = 0
                current = datetime.now()
            zmq_socket.send_pyobj(data)
            sio.emit('class', status)
예제 #5
0
    def on_any_event(event):
        if event.is_directory:
            return None

        elif event.event_type == 'created':
            # Take any action here when a file is first created.
            print("Received created event - %s." % event.src_path)
            try:
                img_path = event.src_path
                print('\tScanning %s' % img_path)

                bname = basename(splitext(img_path)[0])

                R, _ = detect(vehicle_net,
                              vehicle_meta,
                              img_path,
                              thresh=lp_threshold)

                R = [r for r in R if r[0] in ['vehicle registration plate']]

                print('\t\t%d plates found' % len(R))

                if len(R):

                    Iorig = cv2.imread(img_path)
                    WH = np.array(Iorig.shape[1::-1], dtype=float)
                    Lcars = []

                    for i, r in enumerate(R):
                        cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                            (WH, WH))).tolist()
                        tl = np.array([cx - w / 2., cy - h / 2.])
                        br = np.array([cx + w / 2., cy + h / 2.])
                        label = Label(0, tl, br)
                        Icar = crop_region(Iorig, label)

                        Lcars.append(label)
                        height, width, _ = Icar.shape
                        print(height, width)
                        if height > 24:
                            cv2.imwrite(
                                '%s/%s_%dplate.png' % (output_dir, bname, i),
                                Icar)
                        else:
                            print(
                                'plate likely too small to OCR, high change of FP'
                            )
                            if os.path.exists(img_path):
                                os.remove(img_path)
                else:
                    if os.path.exists(img_path):
                        os.remove(img_path)

            except:
                traceback.print_exc()
                sys.exit(1)

        elif event.event_type == 'modified':
            # Taken any action here when a file is modified.
            print("Received modified event - %s." % event.src_path)
예제 #6
0
파일: detect.py 프로젝트: hy-xiong/alpr_vid
def find_vehicle_one_img(img_path, veh_net, veh_meta, out_dir, veh_thd):
    st = time.time()
    print '\tScanning %s' % img_path
    bname = basename(splitext(img_path)[0])
    R, _ = dn.detect(veh_net, veh_meta, img_path, thresh=veh_thd)
    # R: [name, prob, [x center, y center, width, height]]
    R = [r for r in R if r[0] in ['car', 'bus']]
    out_img = []
    out_label_f = ""
    if len(R):
        Iorig = cv2.imread(img_path)
        WH = np.array(Iorig.shape[1::-1], dtype=float)
        Lcars = []
        for i, r in enumerate(R):
            cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
            tl = np.array([cx - w / 2., cy - h / 2.])
            br = np.array([cx + w / 2., cy + h / 2.])
            label = Label(0, tl, br)
            Icar = crop_region(Iorig, label)
            Lcars.append(label)
            p_img = '%s/%s_%dcar.png' % (out_dir, bname, i)
            cv2.imwrite(p_img, Icar)
            out_img.append(p_img)
        out_label_f = '%s/%s_cars.txt' % (out_dir, bname)
        lwrite(out_label_f, Lcars)
    print '\t\t%d cars found, runtime: %.1fs' % (len(R), time.time() - st)
    return out_img, out_label_f
예제 #7
0
def detect_video(video_loc, frames_to_skip, out_dir, threshold):

    # Start yolonet
    net, meta = init_net()

    nr = 0
    skip = frames_to_skip
    frames_loc = 'videoframes'
    files_with_cars = []

    if not os.path.exists(frames_loc):
        os.makedirs(frames_loc)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    # Read input video using cv2
    cap = cv2.VideoCapture(video_loc)

    while cap.isOpened():

        ret, frame = cap.read()

        # Stop if the video is over
        if ret == False:
            break

        nr += 1

        if nr % skip != 0:
            continue

        # save the frame so darknet could detect it
        # could be skipped and feed image straight to network
        name = frames_loc + '/frame' + str(nr) + '.jpg'
        cv2.imwrite(name, frame)

        r = dn.detect(net, meta, bytes(name, "ascii"), thresh=threshold)

        # only save image if there is a car in frame
        cars = [x for x in r if x[0] == b'car']

        if len(cars) > 0:
            outname = out_dir + '/frame' + str(nr) + '.jpg'
            files_with_cars.append(outname)
            cv2.imwrite(outname, frame)

        print("Found {} car(s) from frame {}".format(str(len(cars)), str(nr)))
        
        for _, conf, coords in cars:
            print("\tConfidence {}".format(conf))

    print("Files with cars: {}".format(", ".join(files_with_cars)))

    cap.release()

    # Remove temporary files

    shutil.rmtree(frames_loc, ignore_errors=False)
def lp_ocr(input_dir):

    output_dir = input_dir

    try:
        ocr_threshold = .4

        ocr_weights = 'data/ocr/ocr-net.weights'
        ocr_netcfg = 'data/ocr/ocr-net.cfg'
        ocr_dataset = 'data/ocr/ocr-net.data'

        ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
        ocr_meta = dn.load_meta(ocr_dataset)

        imgs_paths = sorted(glob('%s/*lp.png' % output_dir))

        print 'Performing OCR...'

        for i, img_path in enumerate(imgs_paths):

            print '\tScanning %s' % img_path

            bname = basename(splitext(img_path)[0])

            R, (width, height) = detect(ocr_net,
                                        ocr_meta,
                                        img_path,
                                        thresh=ocr_threshold,
                                        nms=None)

            if len(R):

                L = dknet_label_conversion(R, width, height)
                L = nms(L, .45)

                L.sort(key=lambda x: x.tl()[0])
                lp_str = ''.join([chr(l.cl()) for l in L])

                # print "AFTER NMS AND SORT"
                # for label in L:
                #     print label.letter(), label.prob(), label
                # print "=========================================================================="

                prob_str = ''.join([str(l.prob()) + ',' for l in L])

                with open('%s/%s_str.txt' % (output_dir, bname), 'w') as f:
                    f.write(lp_str + '\n')
                    f.write(prob_str + '\n')

                print '\t\tLP: %s' % lp_str

            else:

                print 'No characters found'

    except:
        traceback.print_exc()
        sys.exit(1)
예제 #9
0
    def on_any_event(event):
        if event.is_directory:
            return None

        elif event.event_type == 'created':
            # Take any action here when a file is first created.
            print("Received created event - %s." % event.src_path)
            try:
                print('\tScanning %s' % event.src_path)
                try:
                    img_path = event.src_path

                    print('\tScanning %s' % img_path)

                    bname = basename(splitext(img_path)[0])

                    R, _ = detect(vehicle_net,
                                  vehicle_meta,
                                  img_path.encode('utf-8'),
                                  thresh=vehicle_threshold)

                    R = [r for r in R if r[0] in ['car', 'bus']]

                    print('\t\t%d cars found' % len(R))

                    if len(R):

                        Iorig = cv2.imread(img_path)
                        WH = np.array(Iorig.shape[1::-1], dtype=float)
                        Lcars = []

                        for i, r in enumerate(R):
                            cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                                (WH, WH))).tolist()
                            tl = np.array([cx - w / 2., cy - h / 2.])
                            br = np.array([cx + w / 2., cy + h / 2.])
                            label = Label(0, tl, br)
                            Icar = crop_region(Iorig, label)

                            Lcars.append(label)

                            cv2.imwrite(
                                '%s/%s_%dcar.png' % (output_dir, bname, i),
                                Icar)

                except:
                    traceback.print_exc()
                    sys.exit(1)

            except:
                traceback.print_exc()
                sys.exit(1)
예제 #10
0
def predict():
    start = time.time()
    with open("temp.jpg", 'wb') as image:
        image.write(request.stream.read())
    end = time.time()
    logger.info("Time used for writing image: {}ms".format(
        (end - start) * 1000))
    start = time.time()
    r = darknet.detect(net, meta, "temp.jpg".encode('utf-8'))
    end = time.time()
    logger.info("Time used for detect: {}ms, result: {}".format(
        (end - start) * 1000, str(r)))
    return str(r)
예제 #11
0
파일: int.py 프로젝트: itnoneedteach/alpr
def vehicle_detection(img_path, output_dir):
    try:
        if not isdir(output_dir):
            makedirs(output_dir)

        bname = basename(splitext(img_path)[0])
        plates = []

        # Vehicle detection
        R, _ = detect(vehicle_net,
                      vehicle_meta,
                      img_path,
                      thresh=vehicle_threshold)
        R = [r for r in R if r[0] in ['car', 'bus', 'motorbike']]

        # print '\t\t%d cars found' % len(R)
        if not len(R):
            return ("", plates)

        Iorig = cv2.imread(img_path)
        WH = np.array(Iorig.shape[1::-1], dtype=float)
        Lcars = []

        for i, r in enumerate(R):
            cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
            tl = np.array([cx - w / 2., cy - h / 2.])
            br = np.array([cx + w / 2., cy + h / 2.])
            label = Label(0, tl, br)
            Icar = crop_region(Iorig, label)

            Lcars.append(label)
            carImagePath = '%s/%s_%dcar.png' % (output_dir, bname, i)
            cv2.imwrite(carImagePath, Icar)
            #print("CarImagePath: ", carImagePath)

            # LP detection
            LPImagePath = LPDection(carImagePath)
            if LPImagePath:
                lp_str = OCRDection(LPImagePath)
                if lp_str:
                    plates.append(lp_str)

        lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)

        # draw yellow box around the cars and red box around license plates
        genOutput(img_path, output_dir, bname)
        return ('%s/%s_output.png' % (output_dir, bname), plates)

    except:
        traceback.print_exc()
        return ("", plates)
예제 #12
0
파일: detect.py 프로젝트: hy-xiong/alpr_vid
def lp_ocr_one_img(img_path, ocr_dn_net, ocr_dn_meta, ocr_thd):
    print '\tScanning %s' % img_path,
    st = time.time()
    R, (width, height) = dn.detect(ocr_dn_net,
                                   ocr_dn_meta,
                                   img_path,
                                   thresh=ocr_thd,
                                   nms=None)
    lp_str = ""
    if len(R):
        L = dknet_label_conversion(R, width, height)
        L = nms(L, .45)
        L.sort(key=lambda x: x.tl()[0])
        lp_str = ''.join([chr(l.cl()) for l in L])
    print 'runtime: %.1f' % (time.time() - st)
    return lp_str
    def extract_text_from_image(self, license_plate_image):
        with tempfile.NamedTemporaryFile(suffix=".png") as temporary_file_name:
            file_name = temporary_file_name.name
            cv2.imwrite(str(file_name), license_plate_image * 255.)
            R, (width, height) = dn.detect(self.ocr_net,
                                           self.ocr_meta,
                                           file_name,
                                           thresh=self.ocr_threshold,
                                           nms=None)
            if len(R):
                L = dknet_label_conversion(R, width, height)
                L = nms(L, .45)

                L.sort(key=lambda x: x.tl()[0])
                lp_str = ''.join([chr(l.cl()) for l in L])
                return lp_str
            else:
                raise Exception("Failed to extract text")
예제 #14
0
def evaluate():
    r = request
    thisNet = net
    thisMeta = meta
    thisImage = image
    # print("0")
    # r = request
    # print(r.data)
    # print("1")
    # nparr = np.fromstring(r.data, np.uint8)
    # print("2")
    # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    # print("3")
    toRet = dn.detect(thisNet, thisMeta, thisImage)
    # print("4")
    # print(toRet)
    # print("5")
    return "Hi tim"
예제 #15
0
def ocr_func(img, lp_point):
    ptsh = np.zeros((2, 4), dtype=np.float32)
    ptsh[:, 0] = lp_point[0:2]
    ptsh[:, 1] = lp_point[2:4]
    ptsh[:, 2] = lp_point[4:6]
    ptsh[:, 3] = lp_point[6:8]
    ptsh = np.concatenate((ptsh, np.ones((1, 4))))
    H = find_T_matrix(ptsh, t_ptsh)
    Ilp = cv2.warpPerspective(img, H, (240, 80), borderValue=.0)
    R, (width, height) = detect(ocr_net, ocr_meta, Ilp, thresh=.4, nms=None)
    if len(R):
        L = dknet_label_conversion(R, width, height)
        L = nms(L, .45)
        L.sort(key=lambda x: x.tl()[0])
        lp_str = ''.join([chr(l.cl()) for l in L])
        print('\t\tLP: %s' % lp_str)
        return lp_str
    else:
        print('No characters found')
        return ''
예제 #16
0
 def detect_pic_list(self, list_path):
     for f_path in list_path:
         pic_name = f_path[0]
         image = skimage.io.imread(f_path[1])
         R, _ = detect(self.vehicle_net,
                       self.vehicle_meta,
                       f_path[1],
                       thresh=self.vehicle_threshold)
         R = [
             r for r in R if r[0] in [
                 'car', 'bus', 'work_van', 'non_motorized_vehicle',
                 'pickup_truck', 'articulated_truck', 'single_unit_truck'
             ]
         ]
         if len(R):
             list_b = []
             for r in R:
                 list_b.append(r[2])
             dict_a = {'image_name': pic_name, 'rois': list_b}
             self.list_out.append(dict_a)
def fr_detect(img):
	print '\t\t\tdetecting front and rear using FRD..., Model:', FR_netcfg
	results, wh = dn.detect(FR_net, FR_meta, img, threshold)

	# the results will be list according to its probability , high prob -> low prob
	if len(results):
		print '\t\t\tFR detection completed'
		FRs = []
		category = []
		for i, result in enumerate(results):
			WH = np.array(img.shape[1::-1], dtype=float)
			cx, cy, w, h = (np.array(result[2]) / np.concatenate((WH, WH))).tolist()
			tl = np.array([cx - w / 2., cy - h / 2.])
			br = np.array([cx + w / 2., cy + h / 2.])
			print '\t\t\tFR number', i, 'position:', tl, br, 'prob:', result[1]
			FRs.append(Label(tl=tl, br=br))
			category.append(result[0])
		return np.array(FRs), category

	else:
		print '\t\t\tFR detection failed'
예제 #18
0
def do_fr(imgs_paths):

    modelmame = basename(splitext(FR_netcfg)[0])
    weight = basename(splitext(FR_weights)[0])

    model_dir = join('output_txt/', modelmame + '_' + valid_dataset)
    final_dir = join(model_dir, weight)

    if not isdir(model_dir):
        mkdir(model_dir)
    if not isdir(final_dir):
        mkdir(final_dir)
    for img_path in imgs_paths:
        print 'FRD processing on', img_path
        img = cv2.imread(img_path)
        frds, wh = dn.detect(FR_net, FR_meta, img, threshold)
        txt_file = open(
            join(final_dir,
                 basename(splitext(img_path)[0]) + '.txt'), 'w')
        if len(frds) == 0:
            txt_file.write('')
            continue
        for frd in frds:
            cx, cy, w, h = (np.array(frd[2])).tolist()
            tl = np.array([cx - w / 2., cy - h / 2.])
            br = np.array([cx + w / 2., cy + h / 2.])
            label_FR = Label(tl=tl, br=br)
            tl = label_FR.tl().astype(int)
            br = label_FR.br().astype(int)
            txt_file.write(frd[0] + ' ' + str('%.2f' % frd[1]) + ' ' +
                           str(tl[0]) + ' ' + str(tl[1]) + ' ' + str(br[0]) +
                           ' ' + str(br[1]) + '\n')
            print '\twrote result to', join(
                final_dir,
                basename(splitext(img_path)[0]) + '.txt')
        txt_file.close()
    def detect_cars(self, image_origin, image_path):
        R, _ = dn.detect(self.vehicle_net,
                         self.vehicle_meta,
                         image_path,
                         thresh=self.vehicle_threshold)

        WH = np.array(image_origin.shape[1::-1], dtype=float)
        widths = []
        heights = []
        car_images = []
        car_labels = []

        for i, r in enumerate(R):
            cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
            widths.append(w), heights.append(h)
            tl = np.array([cx - w / 2., cy - h / 2.])
            br = np.array([cx + w / 2., cy + h / 2.])
            label = Label(0, tl, br)
            car_image = crop_region(image_origin, label).astype("uint8")
            car_images.append(car_image)
            car_labels.append(label)

        best_car_image = car_images[np.argmax(widths)]
        return car_images[np.argmax(widths)], car_labels[np.argmax(widths)]
예제 #20
0
        imgs_paths.sort()

        if not isdir(output_dir):
            makedirs(output_dir)

        print('Searching for vehicles using YOLO...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])
            img_path2 = img_path.encode('ascii')

            R, _ = detect(vehicle_net,
                          vehicle_meta,
                          img_path2,
                          thresh=vehicle_threshold)
            #print(R)

            R = [r for r in R if r[0] in [b'car', b'bus']]

            print('\t\t%d cars found' % len(R))

            if len(R):

                Iorig = cv2.imread(img_path)
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Lcars = []

                for i, r in enumerate(R):
        imgs_paths.sort()
        print(input_dir)

        if not isdir(output_dir):
            os.makedirs(output_dir)

        print('Searching for vehicles using YOLO...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])

            R, _ = detect(vehicle_net,
                          vehicle_meta,
                          bytes(img_path, encoding='utf-8'),
                          thresh=vehicle_threshold)

            R = [r for r in R if r[0] in ['car', 'bus']]

            print('\t\t%d cars found' % len(R))

            if len(R):

                Iorig = cv2.imread(img_path)
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Lcars = []

                for i, r in enumerate(R):

                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                              ocr_weights.encode('utf-8'), 0)
        ocr_meta = dn.load_meta(ocr_dataset.encode('utf-8'))

        imgs_paths = sorted(glob('%s/*.jpg' % output_dir))

        print('Performing OCR...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])

            R, (width, height) = detect(ocr_net,
                                        ocr_meta,
                                        img_path.encode('utf-8'),
                                        thresh=ocr_threshold,
                                        nms=None)

            if len(R):

                L = dknet_label_conversion(R, width, height)
                L = nms(L, .45)

                L.sort(key=lambda x: x.tl()[0])
                lp_str = ''.join([chr(l.cl()) for l in L])

                with open('%s/%s_str.txt' % (output_dir, bname), 'w') as f:
                    f.write(lp_str + '\n')

                print('\t\tLP: %s' % lp_str)
        ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
        ocr_meta = dn.load_meta(ocr_dataset)

        imgs_paths = sorted(glob('%s/*lp.png' % output_dir))

        print 'Performing OCR...'

        for i, img_path in enumerate(imgs_paths):

            print '\tScanning %s' % img_path

            bname = basename(splitext(img_path)[0])

            R, (width, height) = detect(ocr_net,
                                        ocr_meta,
                                        img_path,
                                        thresh=ocr_threshold,
                                        nms=None)

            if len(R):

                L = dknet_label_conversion(R, width, height)
                L = nms(L, .45)

                L.sort(key=lambda x: x.tl()[0])
                lp_str = ''.join([chr(l.cl()) for l in L])

                with open('%s/%s_str.txt' % (output_dir, bname), 'w') as f:
                    f.write(lp_str + '\n')

                print '\t\tLP: %s' % lp_str
def personCount(root,
                in_file,
                output_dir,
                lineState='Horizontal',
                thresh=0.5,
                roiThick=7,
                offset=0):

    try:
        out_file = in_file.split('/')[-1].split('.')[0] + '_out'

        vehicle_threshold = thresh
        roi = roiThick

        vehicle_weights = 'object_detector/data/vehicle-detector/yolov2.weights'
        vehicle_netcfg = 'object_detector/data/vehicle-detector/yolov2.cfg'
        vehicle_dataset = 'object_detector/data/vehicle-detector/coco.data'

        vehicle_net = dn.load_net(vehicle_netcfg, vehicle_weights, 0)
        vehicle_meta = dn.load_meta(vehicle_dataset)

        writer = None
        cap = cv2.VideoCapture(in_file)
        cnt = 0
        car_count = 0

        while cap.isOpened():

            ret, frame = cap.read()

            if not ret:
                root.statusStrVar.set(
                    'Done...Video saved at {}'.format(output_dir + '/' +
                                                      out_file + '.mp4'))
                break

            WH = frame.shape[:2]
            img = nparray_to_image(frame)
            R, _ = detect(vehicle_net, vehicle_meta, img, vehicle_threshold)

            if lineState == "Horizontal":
                linel = (0, WH[0] - (WH[0] / 4) - offset)
                liner = (WH[1], WH[0] - (WH[0] / 4) - offset)
            elif lineState == "Vertical":
                lineu = (WH[1] - (WH[1] / 4) - offset, 0)
                lined = (WH[1] - (WH[1] / 4) - offset, WH[1])

            R = [r for r in R if r[0] in ['person']]

            print 'Processing frame {}'.format(cnt)
            root.statusStrVar.set('Processing frame {}..'.format(cnt))
            print '\t%d person found' % len(R)

            if len(R):
                WH = np.array(frame.shape[1::-1], dtype=float)

                for i, r in enumerate(R):
                    name = r[0]
                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                        (WH, WH))).tolist()
                    tl = (int((cx - w / 2.) * WH[0]), int(
                        (cy - h / 2.) * WH[1]))
                    br = (int((cx + w / 2.) * WH[0]), int(
                        (cy + h / 2.) * WH[1]))
                    print '\t\t{}th Coodrs : ({}, {})'.format(i, tl, br)

                    cv2.rectangle(frame, tl, br, (255, 0, 0),
                                  2)  #crop_region(Iorig,label)

                    if lineState == "Horizontal":
                        cv2.line(frame, linel, liner, (0, 0, 255), 3)

                        if (cy - h / 2) * WH[1] > liner[1] and (
                                cy - h / 2) * WH[1] < liner[1] + roi:
                            car_count += 1
                    elif lineState == "Vertical":
                        cv2.line(frame, lineu, lined, (0, 0, 255), 3)

                        if (cx - w / 2) * WH[0] > lineu[0] and (
                                cx - w / 2) * WH[0] < lineu[0] + roi:
                            car_count += 1

                    cv2.putText(frame, name, tl, cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                                (0, 0, 255), 3, cv2.LINE_AA)

                    cv2.putText(frame, 'Person crossed : ' + str(car_count),
                                (0, int(WH[1])), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                (0, 0, 0), 2, cv2.LINE_AA)
                print '\n'

            if writer is None:
                fourcc = cv2.VideoWriter_fourcc(*'DIVX')
                writer = cv2.VideoWriter(output_dir + '/' + out_file + '.mp4',
                                         fourcc, 30,
                                         (frame.shape[1], frame.shape[0]),
                                         True)

            writer.write(frame)
            del frame
            cnt += 1

    except:
        traceback.print_exc()
        sys.exit(1)

    writer.release()
    cap.release()
    sys.exit(0)
예제 #25
0
        ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
        ocr_meta = dn.load_meta(ocr_dataset)

        imgs_paths = sorted(glob('%s/*lp.png' % output_dir))

        print('Performing OCR...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])

            R, (width, height) = detect(ocr_net,
                                        ocr_meta,
                                        bytes(img_path, encoding='utf-8'),
                                        thresh=ocr_threshold,
                                        nms=None)

            if len(R):

                L = dknet_label_conversion(R, width, height)
                L = nms(L, .45)

                L.sort(key=lambda x: x.tl()[0])
                lp_str = ''.join([chr(l.cl()) for l in L])

                with open('%s/%s_str.txt' % (output_dir, bname), 'w') as f:
                    f.write(lp_str + '\n')

                print('\t\tLP: %s' % lp_str)
예제 #26
0
                #Time 1
                start_time = time.time()

                input_dir = 'frame.jpg'
                output_dir = 'tmp/output'

                if not isdir(output_dir):
                    makedirs(output_dir)

                print 'Searching for vehicles using YOLO...'

                print 'Scanning frame #%d' % n

                R, _ = detect(vehicle_net,
                              vehicle_meta,
                              input_dir,
                              thresh=vehicle_threshold)

                R = [r for r in R if r[0] in ['car', 'bus']]

                if len(R) == 0:
                    continue

                print '\t\t%d cars found' % len(R)

                if len(R):

                    Iorig = frame
                    WH = np.array(Iorig.shape[1::-1], dtype=float)
                    Lcars = []
예제 #27
0
        if cap.isOpened():
            success = True
        else:
            success = False
            print("Open vedio file failed!")

        while (success):
            success, frame = cap.read()
            frame_index += 1
            # 跳10帧处理一次
            if frame_index % 10 != 1:
                continue

            R, _ = detect(vehicle_net,
                          vehicle_meta,
                          frame,
                          thresh=vehicle_threshold,
                          is_imgpath=False)
            #print(R)

            R = [r for r in R if r[0] in [b'car', b'bus']]

            print('\t\t%d cars found' % len(R))

            if len(R):
                Iorig = frame  # 原始帧图片
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Lcars = []

                for i, r in enumerate(R):
예제 #28
0
        imgs_paths = image_files_from_folder(input_dir)
        imgs_paths.sort()

        if not isdir(output_dir):
            makedirs(output_dir)

        print('Searching for vehicles using YOLO...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])

            R, _ = detect(vehicle_net,
                          vehicle_meta,
                          img_path.encode('utf-8'),
                          thresh=vehicle_threshold)

            R = [r for r in R if r[0] in [b'car', b'bus']]

            print('\t\t%d cars found' % len(R))

            if len(R):

                Iorig = cv2.imread(img_path)
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Lcars = []

                for i, r in enumerate(R):

                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
예제 #29
0
	def on_any_event(event):
		if event.is_directory:
			return None

		elif event.event_type == 'created':
			# Take any action here when a file is first created.
			print("Received created event - %s." % event.src_path)
			try:
				print('\tScanning %s' % event.src_path)
				try:
					img_path = event.src_path

					print('\tScanning %s' % img_path)

					bname = basename(splitext(img_path)[0])

					R, _ = detect(vehicle_net, vehicle_meta, img_path.encode('utf-8'), thresh=vehicle_threshold)
					P, _ = detect(vehicle_net, vehicle_meta, img_path.encode('utf-8'), thresh=vehicle_threshold)
					R = [r for r in R if r[0] in ['car', 'bus']]
					P = [p for p in P if p[0] in ['person']]

					print('\t\t%d cars found' % len(R))
					print('\t\t%d persons found' % len(P))

					if len(R):

						Iorig = cv2.imread(img_path)
						WH = np.array(Iorig.shape[1::-1], dtype=float)
						Lcars = []

						for i, r in enumerate(R):
							cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
							tl = np.array([cx - w / 2., cy - h / 2.])
							br = np.array([cx + w / 2., cy + h / 2.])
							label = Label(0, tl, br)
							Icar = crop_region(Iorig, label)

							Lcars.append(label)

							cv2.imwrite('%s/%s_%dcar.png' % (output_dir, bname, i), Icar)

						# lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)
					
					if len(P):

						Iorig = cv2.imread(img_path)
						WH = np.array(Iorig.shape[1::-1], dtype=float)
						Lpeople = []

						for i, r in enumerate(P):
							cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
							print(cx,cy,w,h)
							tl = np.array([cx - w / 2., cy - h / 2.])
                        				tl = [tl[0]-.03,tl[1]-.03]
                        				if tl[0]<0:
                            					tl[0]=0.00
                        				if tl[1]<0:
                            					tl[1]=0.00
							br = np.array([cx + w / 2., cy + h / 2.])
							br = [br[0]+.03,br[1]+.03]
                        				if br[0]>1:
                            					br[0]=1.00
                        				if br[1]>1:
                            					br[1]=1.00
							label = Label(0, tl, br)
							Iperson = crop_region(Iorig, label)
                                                        print(tl,br)
							Lpeople.append(label)
							height,width,_ = Iperson.shape
                        				print(height,width)
							if width>320:
								cv2.imwrite('%s/%s_%dperson.png' % (output_dir_people, bname, i), Iperson)
							else:
								print('face likely too small to recognize')
								print('%s/%s_%dperson.png' % (output_dir_people, bname, i))

						# lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)


				except:
					traceback.print_exc()
					sys.exit(1)


			except:
				traceback.print_exc()
				sys.exit(1)


		elif event.event_type == 'modified':
			# Taken any action here when a file is modified.
			print("Received modified event - %s." % event.src_path)
예제 #30
0
	ocr_dataset = 'data/ocr/ocr-net.data'

	ocr_net  = dn.load_net(ocr_netcfg.encode("utf-8"), ocr_weights.encode("utf-8"), 0)
	ocr_meta = dn.load_meta(ocr_dataset.encode("utf-8"))

	imgs_paths = glob('%s/*lp.png' % output_dir)

	print ('Performing OCR...')

	for i,img_path in enumerate(imgs_paths):

		print ('\tScanning %s' % img_path)

		bname = basename(splitext(img_path)[0])

		R = detect(ocr_net, ocr_meta, img_path ,thresh=ocr_threshold)

		if len(R):

			R.sort(key=lambda x: x[2][0])
			lp_str = ''.join(str(r[0],encoding="utf-8") for r in R)

			with open('%s/%s_str.txt' % (output_dir,bname),'w') as f:
				f.write(lp_str + '\n')

			print ('\t\tLP: %s' % lp_str)

		else:

			print ('No characters found')