Exemplo n.º 1
0
def capture():
    camera = get_camera()
    stamp, img = camera.capture()

    scaled_image, gray = image_utils.img_preprocess(img)

    faces = image_utils.detect_face(gray)
    # Check if have face
    if (len(faces) != 0):
        [x, y, w, h] = image_utils.biggest_face(faces)
        [x, y, w, h] = [int(x / 0.3), int(y / 0.3), int(w / 0.3), int(h / 0.3)]
        crop_img = image_utils.img_crop(img, x, y, w, h)
        img = image_utils.img_draw_rect(img, x, y, w, h)

        if (w < 72):
            # Size of face is too small
            print('Please look closer')
        else:
            # Send FaceID api
            base64_img = image_utils.img2base64(crop_img)
            if (base64_img == ''):
                print('Error when encode image')
            else:
                res = api_volley.verify(base64_img)
                import pdb
                pdb.set_trace()
                print(res)
    else:
        print('Please look closer')
    # import pdb; pdb.set_trace()
    # return redirect(url_for('show_capture', timestamp=stamp))
    return render_template('capture.html', stamp=stamp, path=stamp_file(stamp))
def camshift_face_track():
    face_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    ALPHA = 0.5

    camera = cv2.VideoCapture(0)
    face_box = None

    #wait till first face box is available
    print "Waiting to get first face frame..."
    while face_box is None:
        grabbed, frame = camera.read()
        if not grabbed:
            raise EnvironmentError("Camera read failed!")
        image_prev = cv2.pyrDown(frame)
        face_box = utils.detect_face(face_cascade, image_prev)

    print "Face found!"
    prev_frames = image_prev.astype(np.float32)
    while (True):
        _, frame = camera.read()
        image_curr = cv2.pyrDown(frame)
        cv2.accumulateWeighted(image_curr, prev_frames, ALPHA)
        image_curr = cv2.convertScaleAbs(prev_frames)
        if face_box is not None:
            face_box = camshift_track(image_curr, face_box, termination)
            cv2.rectangle(image_curr, (face_box[0], face_box[1]), (face_box[0]+face_box[2], face_box[1] + face_box[3]),
                          (255, 0,0), 2)
            # cv2.rectangle(image_curr, (box[0], box[1]), (box[0]+box[2], box[1] + box[3]),
            #               (0, 0,255), 2)

        else:
            face_box = utils.detect_face(face_cascade, image_curr)

        cv2.imshow("Output", image_curr)
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break

        elif key & 0xFF == ord('r'):
            print "Reseting face detection!"
            face_box = None
p0 = []
prev_frame = None

while (True):
    grabbed, frame = camera.read()
    if not grabbed:
        print "Camera read failed!"
        break

    frame = utils.image_resize(frame)
    curr_frame_gray = cv2.equalizeHist(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
    info = ""

    if prev_frame is None or len(p0) <= 3:
        info = "Detecting..."
        face = utils.detect_face(face_cascade, frame)
        if face is not None:
            prev_frame = curr_frame_gray
            x,y,w,h = face
            roi = np.zeros(prev_frame.shape, dtype=np.uint8)
            roi[y:y+h, x:x+w] = 255
            p0 = cv2.goodFeaturesToTrack(prev_frame, mask=roi, **feature_params)

    else:
        p1,st,err = cv2.calcOpticalFlowPyrLK(prev_frame, curr_frame_gray, p0, None, **lk_params)
        # Update points being tracked to new good set
        p0 = p1[st==1].reshape(-1,1,2)
        info = "Tracking: %d" % len(p0)
        for pt in p1:
            a,b = pt.ravel()
            cv2.circle(frame, (a,b), 3, (0,255,0), -1)
    # ap.add_argument("-r", "--right", required=True, help="Path to right image")
    # args = vars(ap.parse_args())

    # imgL = cv2.pyrDown(cv2.imread(args["left"]))  # downscale images for faster processing
    # imgR = cv2.pyrDown(cv2.imread(args["right"]))
    # stereo_match(imgL, imgR)
    camera = cv2.VideoCapture(0)
    accumulated_verts = None
    face_box = None

    while face_box is None:
        grabbed, frame = camera.read()
        if not grabbed:
            raise EnvironmentError("Camera read failed!")
        img = frame
        face_box = utils.detect_face(face_cascade, img)

    img_ref = get_resized_image(img, face_box)
    print img_ref.shape

    disparity = np.zeros((img_ref.shape[0], img_ref.shape[1]), dtype=np.uint8)

    while (True):
        _, frame = camera.read()
        img = frame
        face_box = utils.detect_face(face_cascade, img)

        if face_box is not None:
            img_curr = get_resized_image(img, face_box)
            disparity |= stereo_match(img_ref, img_curr)  # cv2.bitwise_and(disparity, stereo_match(imgRef, imgR))
    # ap.add_argument("-r", "--right", required=True, help="Path to right image")
    # args = vars(ap.parse_args())

    # imgL = cv2.pyrDown(cv2.imread(args["left"]))  # downscale images for faster processing
    # imgR = cv2.pyrDown(cv2.imread(args["right"]))
    # stereo_match(imgL, imgR)
    camera = cv2.VideoCapture(0)
    accumulated_verts = None
    face_box = None

    while face_box is None:
        grabbed, frame = camera.read()
        if not grabbed:
            raise EnvironmentError("Camera read failed!")
        img = frame
        face_box = utils.detect_face(face_cascade, img)

    img_ref = get_resized_image(img, face_box)
    print img_ref.shape

    disparity = np.zeros((img_ref.shape[0], img_ref.shape[1]), dtype=np.uint8)

    while (True):
        _, frame = camera.read()
        img = frame
        face_box = utils.detect_face(face_cascade, img)

        if face_box is not None:
            img_curr = get_resized_image(img, face_box)
            disparity |= stereo_match(
                img_ref, img_curr
    return p


camera = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier(
    'Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
calibrate = True

calibration_rects = {}

while True:
    face_box = None
    grabbed, frame = camera.read()
    frame = utils.image_resize(frame, height=600)
    face_box = utils.detect_face(face_cascade, frame)

    if face_box is None:
        continue

    cv2.rectangle(frame, (face_box[0], face_box[1]),
                  (face_box[0] + face_box[2], face_box[1] + face_box[3]),
                  (255, 0, 0), 2)

    if calibrate:
        utils.add_text(
            frame, "Press: W - closest, S - farthest, C - neutral, Q - Done")
        no_points_either_side = z_axis_length / 2
        cv2.imshow("Calibrating...", frame)
        key = cv2.waitKey(1) & 0xFF