Exemplo n.º 1
0
def api(P1, P2, P3):
    errmsg = ""
    detected_person = []  #{}
    opt_person = []
    start_ts = time.time()

    try:
        #face_names,face_vectors,face_id_lists=get_face_db_from_redis(P1,P2)
        face_names, face_vectors, face_id_lists = get_face_db_from_json(P1)
        #print(face_names)
        #print(face_vectors)
        if os.path.isfile(P2):
            test_face_imgs, test_locations, test_encodings = get_face_vector(
                P2)
            if len(test_encodings) > 0:
                detected_person, _ = get_face_profile(face_names, face_vectors,
                                                      face_id_lists,
                                                      test_face_imgs,
                                                      test_locations,
                                                      test_encodings)
                img = mark_face(P2, test_locations, detected_person)
                pu.SaveImg(img, P3)
                pu.ShowImgIfWinOS(img)
        else:
            errmsg = "image not exist"
    except Exception as e:
        errmsg = str(e)

    for s in detected_person:
        if s != '?': opt_person.append(s)

    log.Json_log(l, "error_msg", errmsg)
    log.Json_log(l, "detected", opt_person)
    log.Json_log(l, "process_time", round(time.time() - start_ts, 3))
    log.Json_print(l)
Exemplo n.º 2
0
def api(P1, P2, P3):
    imgs = []
    totalimgs = []
    errmsg = ""
    start_ts = time.time()
    have_face = False
    best_score = 0
    det_time = 0
    fpsOpencvDnn = 0
    try:
        cap = cv2.VideoCapture(P1)
        hasFrame, frame = cap.read()

        #vid_writer = cv2.VideoWriter('output-dnn-{}.avi'.format(str(source).split(".")[0]),cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1],frame.shape[0]))
        frame_count = 0
        tt_opencvDnn = 0
        while (frame_count < int(P3)):
            hasFrame, frame = cap.read()
            if not hasFrame: break
            frame_count += 1
            if frame_count % frame_step != 0: continue

            t = time.time()
            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            #sframe = cv2.resize(frame,(pw,ph),interpolation=cv2.INTER_AREA)
            #sframe=frame
            #sframe = sframe[:, :, ::-1]
            #sframe = cv2.cvtColor(sframe,cv2.COLOR_BGR2GRAY)

            sframe, imgs, bboxes = DetBodyFaces(frame, frame)  #dnn 作法
            #imgs,bboxes=pd.CvDetBodyFaces(sframe,frame)	#haar 作法 face+body
            #imgs,bboxes=pd.DeepDetBodyFaces(sframe,frame)	#haar 作法 face+body
            #bboxes=pd.CvDetFace(sframe,frame)	#haar 作法 face

            tt_opencvDnn += time.time() - t
            fpsOpencvDnn = frame_count / tt_opencvDnn
            label = "FPS : {:.2f}".format(fpsOpencvDnn)
            #print(label)
            pu.ShowCVVideoIfWinOS(sframe)
            for img in imgs:
                #pu.ShowImgIfWinOS(img)
                totalimgs.append(img)
            #pu.ShowImgIfWinOS(best_img)
            #cv2.putText(outOpencvDnn, label, (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1.1, (0, 0, 255), 3, cv2.LINE_AA)
            #cv2.imshow("Face Detection Comparison", frame)
            #vid_writer.write(frame)
            if frame_count == 1: tt_opencvDnn = 0

        det_time = time.time()
        have_face, best_img, best_score = pd.DlibGetBestFace(totalimgs,
                                                             score=face_score,
                                                             debug=False)
        if have_face:
            pu.ShowImgIfWinOS(best_img)
            nimg = ClipBestFace(best_img)
            pu.ShowImgIfWinOS(nimg)
            pu.SaveImg(nimg, P2)
    except Exception as e:
        errmsg = str(e)

    log.Json_log(l, "p1", P1)
    log.Json_log(l, "p2", P2)
    log.Json_log(l, "p3", P3)
    log.Json_log(l, "fps", fpsOpencvDnn)
    log.Json_log(l, "processed_frames", frame_count)
    log.Json_log(l, "face_detected", have_face)
    log.Json_log(l, "face_score", best_score)
    log.Json_log(l, "error_msg", errmsg)
    log.Json_log(l, "process_time_detface", round(det_time - start_ts, 3))
    log.Json_log(l, "process_time_bestface", round(time.time() - det_time, 3))
    log.Json_print(l)
    cv2.destroyAllWindows()
Exemplo n.º 3
0
def api(P1, P2, P3):
    imgs = []
    totalimgs = []
    errmsg = ""
    have_face = False
    best_score = 0
    det_time = 0
    fpsOpencvDnn = 0
    first_frame = []
    background_frame = []

    try:
        cap = cv2.VideoCapture(P1)
        hasFrame, frame = cap.read()
        #vid_writer = cv2.VideoWriter('output-dnn-{}.avi'.format(str(source).split(".")[0]),cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1],frame.shape[0]))
        frame_count = 0
        tt_opencvDnn = 0
        start_ts = time.time()
        while (frame_count < int(P3)):
            tt = time.time()
            hasFrame, frame = cap.read()
            pc.TIME_WAIT_FRAME += (time.time() - tt)
            if not hasFrame: break
            if len(first_frame) == 0: first_frame = frame
            #			if frame_count%30==0:
            #				background_frame=first_frame #background 每 30 frames refresh一次
            #				first_frame=frame
            frame_count += 1
            if frame_count % frame_step != 0: continue

            t = time.time()
            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            #sframe = cv2.resize(frame,(pw,ph),interpolation=cv2.INTER_AREA)
            #sframe=frame
            #sframe = sframe[:, :, ::-1]
            #sframe = cv2.cvtColor(sframe,cv2.COLOR_BGR2GRAY)
            #------- 去除背景
            cut_frame = cv2.resize(frame, (sw, sh),
                                   interpolation=cv2.INTER_AREA)
            gframe, cframe = pd.FilterFrame(frame, first_frame, pw, ph)
            cframe, motion_rect = pd.FindMotionRect(cut_frame, cframe,
                                                    motion_size)
            #motion_rect=[cut_frame]
            #-------
            motion_bboxes = []
            motion_imgs = []
            #if len(motion_rect)==0: continue
            for m in motion_rect:
                #pu.ShowImgIfWinOS(m)
                #sframe,imgs, bboxes = pd.CVDnnDetBodyFaces(m,m)	#dnn 作法
                imgs, bboxes = pd.CvDetBodyFaces(
                    m, m, CVminNeighbors=1, minsize=25,
                    maxsize=200)  #haar 作法 face+body
                #imgs,bboxes=pd.DeepDetBodyFaces(m,m)	#haar 作法 face+body
                #bboxes=pd.CvDetFace(sframe,frame)	#haar 作法 face
                motion_bboxes = motion_bboxes + bboxes
                motion_imgs = motion_imgs + imgs
                #if len(motion_bboxes)>0: print(motion_bboxes)

            tt_opencvDnn += time.time() - t
            fpsOpencvDnn = frame_count / tt_opencvDnn
            label = "FPS : {:.2f}".format(fpsOpencvDnn)
            cv2.putText(cframe, label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                        (0, 0, 255), 2, cv2.LINE_AA)
            pu.ShowCVVideoIfWinOS(cframe)
            for img in motion_imgs:
                #pu.ShowImgIfWinOS(img)
                totalimgs.append(img)
            #cv2.imshow("Face Detection Comparison", frame)
            #vid_writer.write(frame)
            if frame_count == 1: tt_opencvDnn = 0

        det_time = time.time()
        have_face, best_img, best_score = pd.DlibGetBestFace(totalimgs,
                                                             score=face_score,
                                                             debug=False)
        if have_face:
            pu.ShowImgIfWinOS(best_img)
            nimg = ClipBestFace(best_img)
            pu.ShowImgIfWinOS(nimg)
            pu.SaveImg(nimg, P2)
    except Exception as e:
        errmsg = str(e)

    log.Json_log(l, "p1", P1)
    log.Json_log(l, "p2", P2)
    log.Json_log(l, "p3", P3)
    log.Json_log(l, "fps", fpsOpencvDnn)
    log.Json_log(l, "processed_frames", frame_count)
    log.Json_log(l, "face_detected", have_face)
    log.Json_log(l, "face_score", best_score)
    log.Json_log(l, "error_msg", errmsg)

    log.Json_log(l, "0.process_time_wait_frame", round(pc.TIME_WAIT_FRAME, 3))
    log.Json_log(l, "1.process_time_filter_frame",
                 round(pc.TIME_FILTER_FRAME, 3))
    log.Json_log(l, "2.process_time_motion_block",
                 round(pc.TIME_MOTION_DET, 3))
    log.Json_log(l, "3.process_time_detface", round(pc.TIME_FACE_DET, 3))
    log.Json_log(l, "4.process_time_bestface", round(time.time() - det_time,
                                                     3))
    log.Json_log(l, "5.process_time_ui_shown", round(pc.TIME_UI_SHOWN, 3))
    log.Json_log(l, "9.process_time_total", round(time.time() - start_ts, 3))
    log.Json_print(l)
    cv2.destroyAllWindows()