def main():
    # Instantiate Classes
    detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH)
    model = FaceModel()
    display = Display()
    capture = Capture()

    oldTime = time.time()
    i = 0
    subprocess.call(['speech-dispatcher'])

    while True:
        # Calculate time difference (dt), update oldTime variable
        newTime = time.time()
        dt = newTime - oldTime
        oldTime = newTime

        # Grab Frames
        frames = capture.read()

        # Detect face 20% of the time, eyes 100% of the time
        if i % 10 is 0:
            rects = detector.detect(frames)
        else:
            rects = detector.detect(frames, model.getPreviousFaceRects())
        i += 1

        # Add detected rectangles to model
        model.add(rects)

        # Render
        display.renderScene(frames['display'], model, rects)
        display.renderEyes(frames['color'], model)
コード例 #2
0
def main(rootDir):
    detector = FaceDetector()
    for iDir, subjectDirs, files in os.walk(rootDir):
        for subjectDir in subjectDirs:
            print('Walking subject folder at %s' % subjectDir)
            subjectPath = os.path.join(rootDir, subjectDir)
            # Each session
            for jDir, sessionDirs, subjectFiles in os.walk(subjectPath):
                for sessionDir in sessionDirs:
                    print('Walking sessions folder at %s' % sessionDir)
                    sessionPath = os.path.join(subjectPath, sessionDir)
                    img = cv2.imread(sessionPath + '/im0.bmp', 0)
                    face_coords = detector.detect(img)
                    for i in range(4):
                        imgFilePath = sessionPath + '/im%s.bmp' % i
                        imgCroppedFilePath = sessionPath + '/im%s_cropped.bmp' % i
                        if os.path.isfile(imgFilePath):
                            print('Cropping the image %s' % imgFilePath)
                            img = cv2.imread(imgFilePath)
                            if face_coords is None:
                                print(
                                    'Did not find a face, just using normal image'
                                )
                                cv2.imwrite(imgCroppedFilePath, img)
                            else:
                                face = detector.crop_face(img, face_coords)
                                cv2.imwrite(imgCroppedFilePath, face)
コード例 #3
0
def make_prediction():
    themodels = train_models()
    #rec_eig = themodels[0]
    #fish = themodels[1]
    lbph = themodels[2]

    labels_dic = themodels[3]

    webcam = VideoCamera()
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    frame = webcam.get_frame()
    faces_coord = detector.detect(frame)
    faces = normalize_faces(frame, faces_coord)
    face = faces[0]

    plt.imshow(face)
    plt.show()
    del webcam

    #prediction, confidence = rec_eig.predict(face)
    #print ('Eigen faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    #prediction, confidence = fish.predict(face)
    #print ('Fisher Faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    prediction, confidence = lbph.predict(face)
    print ('LBPH -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))
コード例 #4
0
    def run(self):
        print("Starting Detection")
        self.stopped = False
        fd = FaceDetector(0.5)
        detected = False
        # lc = LaserController(640,480)

        while not self.stopped:

            if len(self.frame) > 0:

                frame = fd.detect(self.frame)
                self.coordinates.emit((fd.startX, fd.startY))
                # lc.sendCoords(fd.startX, fd.startY, fd.endX, fd.endY)
                if fd.detected:
                    if not detected:
                        self.play.emit(1)
                    detected = True
                else:
                    if detected:
                        self.play.emit(2)
                    detected = False

                rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                h, w, ch = rgb_image.shape
                bytesPerLine = ch * w
                convertToQtFormat = QtGui.QImage(rgb_image.data, w, h,
                                                 bytesPerLine,
                                                 QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.image.emit(p)
        self.image.emit(QImage("resources/bg3.jpg"))
        self.finished.emit()
コード例 #5
0
def collectImages():
    folder = "people/" + raw_input('Person: ').lower()
    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)

    if not os.path.exists(folder):
        detector = FaceDetector('haarcascade_frontalface_default.xml')
        vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
        time.sleep(2.0)
        os.makedirs(folder)
        counter = 0
        timer = 0
        while counter < 13:
            frame = vs.read()
            #frame = imutils.resize(frame, width=600)
            faces_coord = detector.detect(frame)
            if len(faces_coord) and timer % 700 == 50:
                faces = normalize_faces(frame, faces_coord)
                cv2.imwrite(folder + '/' + str(counter) + '.jpg', faces[0])
                print("Images saved: " + str(counter))
                counter += 1
            draw_rectangle(frame, faces_coord)
            cv2.imshow('Frame', frame)
            key = cv2.waitKey(1) & 0xFF
            timer += 50

            if key == ord("q"):
                break

        cv2.destroyAllWindows()
        vs.stop()
        runMenu()
    else:
        print("Name already taken")
        runMenu()
コード例 #6
0
    def long_running(self):
        print("Starting")
        self.stopped = False
        self.videoStream = cv2.VideoCapture(0)
        fd = FaceDetector(0.5)
        cl = Calibration()
        detected = False

        while not self.stopped:
            ret, frame = self.videoStream.read()
            if ret:
                self.pic.emit(frame)
                frame = fd.detect(frame)
                self.coordinates.emit((fd.startX, fd.startY, fd.endX, fd.endY))
                # lc.sendCoords(fd.startX, fd.startY, fd.endX, fd.endY)
                if fd.detected:
                    if not detected:
                        self.play.emit(1)
                    detected = True
                else:
                    if detected:
                        self.play.emit(2)
                    detected = False

                rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                h, w, ch = rgb_image.shape
                bytesPerLine = ch * w
                convertToQtFormat = QtGui.QImage(rgb_image.data, w, h,
                                                 bytesPerLine,
                                                 QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.image.emit(p)
        self.image.emit(QImage("resources/bg3.jpg"))
        self.finished.emit()
コード例 #7
0
def make_prediction():
    themodels = train_models()
    #rec_eig = themodels[0]
    #fish = themodels[1]
    lbph = themodels[2]

    labels_dic = themodels[3]

    vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    faces_coord = detector.detect(frame)
    faces = normalize_faces(frame, faces_coord)
    face = faces[0]

    #prediction, confidence = rec_eig.predict(face)
    #print ('Eigen faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    #prediction, confidence = fish.predict(face)
    #print ('Fisher Faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    prediction, confidence = lbph.predict(face)
    print('LBPH -> prediction: ' + labels_dic.get(prediction).capitalize() +
          " Confidence: " + str(round(confidence)))
コード例 #8
0
def build_data_set(url=0, win_name="live!"):
    # Face Detection using Haar Cascades:
    # Goal: We will see the basics of face detection using Haar Feature-based Cascade Classifiers
    # Basics: Each feature is a single value obtained by subtracting sum of pixels under the white rectangle from sum
    #         of pixels under the black rectangle.
    # OpenCV already contains many pre-trained classifiers for face, eyes, smiles, etc.
    # Those XML files are stored in the opencv/data/haarcascades/ folder.
    # It is a machine learning based approach where a cascade function is trained from a lot of positive and
    # negative images. It is then used to detect objects in other images.

    video = VideoCamera(url)
    detector = FaceDetector('haarcascade_frontalface_default.xml')
    cv2.namedWindow(win_name, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(win_name, cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    name = ""
    number_of_pic = 10

    print("Enter Your name")
    while True:
        frame = video.get_frame()
        height, width, _ = frame.shape

        cv2.imshow(win_name, frame)

        key = cv2.waitKey(40) & 0xFF
        if key not in [8, 13, 27, 255]:
            name += chr(key)
            print(name)
        elif key == 8:
            name = name[:-1]
        elif key == 27:
            cv2.destroyAllWindows()
            return
        elif key == 13:
            folder = "people/" + name.lower()  # input name
            break

    if not os.path.exists(folder):
        os.mkdir(folder)
    init_pic = len(os.listdir(folder))
    counter = init_pic
    timer = 0
    while counter < number_of_pic + init_pic:  # take 10 photo
        frame = video.get_frame()
        faces_coordinates = detector.detect(frame)
        if len(faces_coordinates) and timer % 700 == 50:
            faces = normalize_faces(frame, faces_coordinates)
            cv2.imwrite(folder + '/' + str(counter) + '.jpg', faces[0])
            counter += 1
        cv2.imshow(win_name, frame)
        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        timer += 50
    cv2.destroyAllWindows()
コード例 #9
0
def main(leftpath, rightpath):

    imgL = cv2.imread(leftpath)
    imgR = cv2.imread(rightpath)

    imgL = cv2.pyrDown(imgL)
    imgR = cv2.pyrDown(imgR)

    detector = FaceDetector()
    lbp = LBP()

    face_coords = detector.detect(imgL)
    x, y, w, h = face_coords
    x -= 400
    y -= 400
    w += 800
    h += 800

    # imgL = detector.crop_face(imgL, (x, y, w, h))
    # imgR = detector.crop_face(imgR, (x, y, w, h))

    # cv2.imwrite('left.png', imgL)
    # cv2.imwrite('right.png', imgL)

    window_size = 3
    min_disp = 16
    num_disp = 112 - min_disp

    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=num_disp,
                                   blockSize=5,
                                   P1=8 * 3 * window_size**2,
                                   P2=32 * 3 * window_size**2,
                                   disp12MaxDiff=1,
                                   uniquenessRatio=10,
                                   speckleWindowSize=100,
                                   speckleRange=32)

    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    h, w = imgL.shape[:2]
    f = 0.8 * w  # guess for focal length
    Q = np.float32([
        [1, 0, 0, -0.5 * w],
        [0, -1, 0, 0.5 * h],  # turn points 180 deg around x-axis,
        [0, 0, 0, -f],  # so that y-axis looks up
        [0, 0, 1, 0]
    ])

    points = cv2.reprojectImageTo3D(disp, Q)

    out_points = points  # [mask]
    out_points = out_points[:, :, 2]
    print(out_points)
    hist, bins = lbp.run(out_points, False)
    print('hist', hist)
コード例 #10
0
def live_recognition():
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    webcam = VideoCamera(0)
    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)
    models = train_models()

    lbph = models[2]
    labels_dic = models[3]

    while True:
        frame = webcam.get_frame()
        faces_coord = detector.detect(frame, True) #detects more than 1 face

        if len(faces_coord) > 0:
            faces = normalize_faces(frame, faces_coord) #normalize
            for i, face in enumerate(faces): #for each detected face
                pred, conf = lbph.predict(face)
                threshold = 45
                print ("Prediction: " + labels_dic[pred].capitalize() + '\nConfidence: ' + str(round(conf)))
                clear_output(wait = True)

                if conf < threshold:
                    cv2.putText(frame,
                                labels_dic[pred].capitalize(),
                                (faces_coord[i][0], faces_coord[i][1] - 10),
                                cv2.FONT_HERSHEY_PLAIN,
                                3,
                                (66, 53, 243),
                                2,
                                cv2.LINE_AA)
                else:
                    cv2.putText(frame,
                        "Unknown",
                        (faces_coord[i][0], faces_coord[i][1]),
                        cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2)

            draw_rectangle(frame, faces_coord)
            cv2.imshow("Testing", frame)
            if cv2.waitKey(40) & 0xFF == 27:
                del frame
                cv2.destroyAllWindows()
                break
        else:
            cv2.putText(frame, "ESC to exit", (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2, cv2.LINE_AA)
            cv2.imshow("Testing", frame)
            if cv2.waitKey(40) & 0xFF == 27:
                del frame
                cv2.destroyAllWindows()
                break
コード例 #11
0
def live_recognition():
    models = train_models()
    lbph = models[2]
    labels_dic = models[3]
    threshold = 100
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
    time.sleep(2.0)

    while True:

        frame = vs.read()
        #frame = imutils.resize(frame, width=600)
        faces_coord = detector.detect(frame, True)

        if len(faces_coord) > 0:
            faces = normalize_faces(frame, faces_coord)
            for i, face in enumerate(faces):
                pred, conf = lbph.predict(face)
                print("Prediction: " + labels_dic[pred].capitalize() +
                      '\nConfidence: ' + str(round(conf)))

                if conf < threshold:
                    if conf < threshold:
                        cv2.putText(
                            frame, labels_dic[pred].capitalize(),
                            (faces_coord[i][0], faces_coord[i][1] - 10),
                            cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2,
                            cv2.LINE_AA)
                else:
                    cv2.putText(frame, "Unknown",
                                (faces_coord[i][0], faces_coord[i][1]),
                                cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2)

                draw_rectangle(frame, faces_coord)
                cv2.imshow("Frame", frame)
                key = cv2.waitKey(1) & 0xFF

                if key == ord("q"):
                    break
    cv2.destroyAllWindows()
    vs.stop()
    runMenu()
コード例 #12
0
def collectImages():
    folder = "people/" + raw_input('Person: ').lower()
    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)
    cap = VideoCamera()
    detector = FaceDetector('haarcascade_frontalface_default.xml')

    if not os.path.exists(folder):
        os.makedirs(folder)
        counter = 0
        timer = 0

        while counter < 10:
            try:
                frame = cap.get_frame()
                rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)

                faces_coord = detector.detect(rgb)
                if len(faces_coord) and timer % 700 == 50:
                    faces = normalize_faces(frame, faces_coord)
                    cv2.imwrite(folder + "/" + str(counter) + ".jpg", faces[0])
                    print ("Images saved: " + str(counter))
                    counter += 1

                draw_rectangle(frame, faces_coord)
                cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
                cv2.imshow('frame', frame)
                cv2.waitKey(50)
                timer += 50
            except KeyboardInterrupt:
                del cap
                cv2.destroyAllWindows()
                print ("Live Video interrupted")
                break
    else:
        del cap
        cv2.destroyAllWindows()
        print ("This name already taken")

    del cap
    cv2.destroyAllWindows()
コード例 #13
0
class Enroller:
    def __init__(self):
        self.init()

    def init(self):
        self.faceDetector = FaceDetector()
        self.faceEncoder = FaceEncoder()
        self.camera = Camera()

        self.faceImageArray = None
        self.faceEncoding = None

    def setFace(self):
        # Start recording
        self.camera.start()
        face_locations = []
        startTime = time.time()
        maxWaitInSeconds = 5
        while len(face_locations) == 0:
            waitTime = time.time() - startTime
            if waitTime > maxWaitInSeconds:
                self.camera.stop()
                return False
            frame = self.camera.takePicture()
            # Detect faces
            face_locations = self.faceDetector.detect(frame,
                                                      resize=True,
                                                      resizeProportion=1)
            if (len(face_locations) > 0):
                face_encodings = self.faceEncoder.encode(frame, face_locations)
        self.camera.stop()

        self.faceImageArray = frame
        self.faceEncoding = face_encodings[0]
        return True

    def enroll(self, dniDataString):
        person = Person(dniDataString, self.faceImageArray, self.faceEncoding)
        person.save()
コード例 #14
0
def face_recognition(url=0, win_name="live!"):
    rec_eig, labels_dic = train_models()
    print("Finished training")

    video = VideoCamera(url)
    frame = video.get_frame()

    detector = FaceDetector('haarcascade_frontalface_default.xml')
    cv2.namedWindow(win_name, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(win_name, cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)
    threshold = 0.5

    while True:
        frame = video.get_frame()
        faces_coordinates = detector.detect(frame)
        if len(faces_coordinates):
            faces = normalize_faces(frame, faces_coordinates)
            for i, face in enumerate(faces):
                pred, conf = rec_eig.predict(faces[i])
                if conf > threshold:
                    print("Prediction: {}, Confidence: {}.".format(
                        labels_dic[pred].capitalize(), conf))
                    cv2.putText(frame, labels_dic[pred].capitalize(),
                                (100, 100), cv2.FONT_HERSHEY_PLAIN, 3,
                                (76, 63, 243), 2)
                else:
                    print("Low Prediction: {}, Confidence: {}.".format(
                        labels_dic[pred].capitalize(), conf))
        cv2.imshow(win_name, frame)

        if cv2.waitKey(1) & 0xff == 27:
            break

    video.__del__()
    cv2.destroyAllWindows()
コード例 #15
0
ファイル: main.py プロジェクト: bkneis/2DFaceRecognizer
def main(args):

    # Create algorithm objects
    lbp = LBP()
    detector = FaceDetector()
    svm = SVM()
    knn = KNearest()

    # Get subjects to train the svm on
    imgs = [
        '/home/arthur/Downloads/lfw_funneled/Gian_Marco/Gian_Marco_0001.jpg',
        '/home/arthur/Downloads/lfw_funneled/Micky_Ward/Micky_Ward_0001.jpg',
        '/home/arthur/Downloads/lfw_funneled/Ziwang_Xu/Ziwang_Xu_0001.jpg',
        '/home/arthur/Downloads/lfw_funneled/Zhu_Rongji/Zhu_Rongji_0001.jpg'
    ]

    # Load the subjects and extract their features
    hists, labels = load_subjects(imgs, detector, lbp)

    # Transform to np arrays
    samples = np.array(hists, dtype=np.float32)
    labels = np.array(labels, dtype=np.int)

    # Train classifiers
    svm.train(samples, labels)
    knn.train(samples, labels)

    # Check which mode the app is running in (image vs. video)
    if args.image is not None:
        # Read the image from the file path provided
        img = cv2.imread(args.image, 0)
        # Check the image exists
        if img is not None:
            # Run face recognition algorithm
            classify_snapshot(img, detector, lbp, knn)
        else:
            print('The image could not be found...')
        return

    # Establish connection to camera
    cap = cv2.VideoCapture(0)

    # Continuously grab the next frame from the camera
    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Start timer for performance logging
        start = time.time()

        # Convert frame to gray scale for face detector
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Detect a face in the frame and crop the image
        face_coords = detector.detect(gray)
        face = detector.crop_face(gray, face_coords)

        # Check we have detected a face
        if face is not None:
            # Apply LBP operator to get feature descriptor
            hist, bins = lbp.run(face, False)

            # Convert the LBP descriptor to numpy array for opencv classifiers
            test_sample = np.array([hist], dtype=np.float32)

            # Get the class of id of the closest neighbour and its distance
            dist, class_id = knn.predict(test_sample)

            # Draw the face if found
            util.draw_face(dist, class_id, frame, face_coords)
            # util.segment_face(frame)

        # Processing finished
        end = time.time()

        # Write the fps to the video
        util.write_fps(start, end, frame)

        # Display the resulting frame
        cv2.imshow('frame', frame)

        # Check if we should stop the application
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
コード例 #16
0
ファイル: test.py プロジェクト: jareturing/FaceDetect
        cv2.rectangle(img, (int(box[0]), int(box[1])),
                      (int(box[2]), int(box[3])), (255, 0, 0), 2)
        cv2.putText(img, '%.2f' % score, (int(box[0]), int(box[1])),
                    cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 1)
        keypoints = np.reshape(keypoints, (-1, 2))

        cv2.circle(img, (int(keypoints[0][0]), int(keypoints[0][1])), 2,
                   (0, 0, 255), 2)
        cv2.circle(img, (int(keypoints[1][0]), int(keypoints[1][1])), 2,
                   (0, 255, 0), 2)
        cv2.circle(img, (int(keypoints[2][0]), int(keypoints[2][1])), 2,
                   (255, 255, 0), 2)
        cv2.circle(img, (int(keypoints[3][0]), int(keypoints[3][1])), 2,
                   (255, 0, 0), 2)
        cv2.circle(img, (int(keypoints[4][0]), int(keypoints[4][1])), 2,
                   (255, 0, 255), 2)

    return img


if __name__ == '__main__':
    detector = FaceDetector(model_path='./models/resnet18.pth',
                            gpu_ids=0,
                            layers=18)  # model_204904   model_262215
    img_paths = glob.glob('./images/*.jpg')
    for img_path in img_paths:
        image = cv2.imread(img_path)
        boxes, landms, scores = detector.detect(image)
        print(boxes, landms, scores)
        d_image = draw(image, boxes, scores, landms)
        cv2.imwrite('./d_images/' + os.path.basename(img_path), d_image)
コード例 #17
0
# List of extensions so search for
extensions = ["jpg", "png", "gif", "jpeg"]

# Output to indicate program
# is running.
print("[X] Working...")

# Loop through all directories and subdirectories
# in the path entered
for subdir, dirs, files in os.walk(rootdir):
    # Loop through all files
    for f in files:
        # Check if they are images
        for extension in extensions:
            if extension in f:
                # Attempt to load our config file with haar classifier
                # path and throw exception if not found.
                try:
                    haar = open("./config.json", "r")
                    haar = json.load(haar)
                    detected = fd.detect(os.path.join(subdir, f), args["e"],
                                         haar["haar_path"])
                except (OSError, IOError):
                    sys.exit(
                        "[X] Error: Could not find config.json, please run again with --config-haar flag."
                    )

                if not detected or args["e"]:
                    os.remove(os.path.join(subdir, f))
コード例 #18
0
class PeekabooController(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

        self.running = False

        self.soundCtrlr = SoundController()

        self.initVideo()

        self.whereIsEveryoneFlag = False
        self.iSeeSomeoneFlag = False
        self.failure = False
        self.record = False

    def initVideo(self):
        try:
            self.video = VideoStream(src=0)
        except:
            print("video stream not found")
        if(self.video is None):
            print("video stream was not initialized")
            return

        try:
            self.video.start()
        except:
            print("video failed to start")

        # construct the face detector and allow the camera to warm up
        try:
            face = "cascades/haarcascade_frontalface_default.xml"
            self.faceDetector = FaceDetector(face)
            sleep(0.1)
        except:
            print("face detector init failed")

        # choose xvid codec
        try:
            self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        except:
            print("video writer not found")

        sleep(0.1)

    #called by the thread
    def run(self):
        self._start()

    def toggleRecord(self):
        if(self.record == True):
            self.record = False
            print("video recording stopped")
        else:
            self.record = True
            print("video recording started")

    # start looking
    def _start(self):
        self.running = True

        zeros = None

        previousX = 0
        direction = "NONE"

        self.writer = None
        (h, w) = (None, None)

        # run until the controller is stopped
        while (True):
            # capture frames from the camera
            if(self.running == True):
                frame = self.video.read()
                if(frame is None):
                    print("ERROR: cannot read frame from video, stopping Peekaboo. If you want Peekaboo to work, connect camera and restart R2.py")
                    self.failure = True
                    self.stop()
                    break

                # resize the frame and convert it to grayscale
                frame = self.resizeImage(frame, width=500)

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # detect faces in the image and then clone the frame
                # so that we can draw on it
                faceRects = self.faceDetector.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
                frameClone = frame.copy()

                # where is everyone?
                if len(faceRects) <= 0:
                    cv2.putText(frameClone, "WHERE IS EVERYONE?", (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
                    self.whereIsEveryone()
                else:
                    # peekaboo!
                    # R2 is happy to see someone
                    self.iSeeSomeone()

                # loop over the face bounding boxes and draw them
                for (fX, fY, fW, fH) in faceRects:
                    cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (255, 0, 0), 2)

                    # only turn head if face gets far out of center
                    if ((previousX - 10) < fX < (previousX + 10)):
                        direction = "NONE"
                    elif fX < (previousX + 10):
                        direction = "LEFT"
                    elif fX > (previousX - 10):
                        direction = "RIGHT"

                    # turn R2's head to keep face centered
                    # if direction == "LEFT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight - 10)
                    # elif direction == "RIGHT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight + 10)

                    cv2.putText(frameClone, "PEEKABOO!".format(direction), (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.55, (0, 255, 0), 2)

                    if direction != "NONE":
                        cv2.putText(frameClone, "<Turn {}>".format(direction), (fX, fY - 0), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.55, (0, 255, 0), 2)

                    previousX = fX

                # show our detected faces, then clear the frame in preparation for the next frame
                # NOTE: comment this out if you don't want the video stream window to show in terminal
                cv2.imshow("Face", frameClone)

                # write video to file
                if self.record == True:
                    if self.writer is None:
                        # store the image dimensions, initialize the video writer,
                        # and construct the zeros array
                        (h, w) = frameClone.shape[:2]
                        self.writer = cv2.VideoWriter("r2_recording.avi", self.fourcc, 4,
                                                      (w, h), True)
                    output = np.zeros((h, w, 3), dtype="uint8")
                    output[0:h, 0:w] = frameClone
                    self.writer.write(output)

                # NOTE: comment this out if you don't want the video stream window to show in terminal
                # if the 'q' key is pressed, stop the loop
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    print("keypress 'q', stopping Peekaboo")
                    self.stop()
                    break

    def resume(self):
        print("starting PeekabooController")
        if(self.failure == True):
            print("ERROR: the video had failed to load.  If you want Peekaboo to work, you will need to connect the camera and restart R2.py")
            return
        self.running = True

    def stop(self):
        print("stopping PeekabooController")
        self.running = False
        if self.writer is not None:
            self.writer.release()

    def stopVideo(self):
        if(self.video is not None):
            self.video.stop()

        try:
            cv2.destroyAllWindows()
        except:
            print("")

    def whereIsEveryone(self):
        if(self.whereIsEveryoneFlag == False):
            SoundController.worried(self.soundCtrlr)
            self.whereIsEveryoneFlag = True
            self.iSeeSomeoneFlag = False

    def iSeeSomeone(self):
        if(self.iSeeSomeoneFlag == False):
            SoundController.whistle(self.soundCtrlr)
            self.whereIsEveryoneFlag = False
            self.iSeeSomeoneFlag = True

    def resizeImage(self, image, width=None, height=None, inter=cv2.INTER_AREA):
        # initialize the dimensions of the image to be resized and
        # grab the image size
        dim = None
        (h, w) = image.shape[:2]

        # if both the width and height are None, then return the
        # original image
        if width is None and height is None:
            return image

        # check to see if the width is None
        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)

        # otherwise, the height is None
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        # resize the image
        resized = cv2.resize(image, dim, interpolation=inter)

        # return the resized image
        return resized
コード例 #19
0
class Authenticator:
    def __init__(self):
        self.init()
        self.setPersons()

    def init(self):
        self.faceDetector = FaceDetector()
        self.faceEncoder = FaceEncoder()
        self.camera = Camera()

        self.faceImageArray = None
        self.faceEncoding = None

        self.personAuthenticated = None

    def setPersons(self):
        self.persons = []
        for fileName in os.listdir("Database"):
            if fileName == '.DS_Store':
                continue
            self.persons.append(Person(dirName=fileName))

    def setFace(self):
        # Start recording
        self.camera.start()
        face_locations = []
        startTime = time.time()
        maxWaitInSeconds = 5
        while len(face_locations) == 0:
            waitTime = time.time() - startTime
            if waitTime > maxWaitInSeconds:
                self.camera.stop()
                return False
            frame = self.camera.takePicture()

            # Detect faces
            face_locations = self.faceDetector.detect(frame,
                                                      resize=True,
                                                      resizeProportion=1)
            if (len(face_locations) > 0):
                face_encodings = self.faceEncoder.encode(frame, face_locations)
        self.camera.stop()

        self.faceImageArray = frame
        self.faceEncoding = face_encodings[0]
        self.faceIdentificator = FaceIdentificator(self.persons,
                                                   self.faceEncoding)
        return True

    def getPersonFaceAuthenticated(self):
        return self.faceIdentificator.getPersonFaceAuthenticated()

    def authenticateFace(self):
        faceDetected = self.setFace()
        if not faceDetected:
            return None
        person = self.getPersonFaceAuthenticated()
        self.personAuthenticated = person
        return person  # if no face authenticated person is "Unknown"

    def authenticate(self, data):
        personDNI = Person(data)
        return self.personAuthenticated.same(personDNI)