def long_running(self):
        print("Starting")
        self.stopped = False
        self.videoStream = cv2.VideoCapture(0)
        fd = FaceDetector(0.5)
        cl = Calibration()
        detected = False

        while not self.stopped:
            ret, frame = self.videoStream.read()
            if ret:
                self.pic.emit(frame)
                frame = fd.detect(frame)
                self.coordinates.emit((fd.startX, fd.startY, fd.endX, fd.endY))
                # lc.sendCoords(fd.startX, fd.startY, fd.endX, fd.endY)
                if fd.detected:
                    if not detected:
                        self.play.emit(1)
                    detected = True
                else:
                    if detected:
                        self.play.emit(2)
                    detected = False

                rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                h, w, ch = rgb_image.shape
                bytesPerLine = ch * w
                convertToQtFormat = QtGui.QImage(rgb_image.data, w, h,
                                                 bytesPerLine,
                                                 QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.image.emit(p)
        self.image.emit(QImage("resources/bg3.jpg"))
        self.finished.emit()
Example #2
0
    def initialize(self, ctx):
        properties = ctx.system_properties
        model_dir = properties.get("model_dir")
        self.model_dir = model_dir
        self.device = torch.device("cuda:" +
                                   str(properties.get("gpu_id")) if torch.cuda.
                                   is_available() else "cpu")

        # Read model serialize/pt file
        onet_path = os.path.join(model_dir, "onet.pt")
        pnet_path = os.path.join(model_dir, "pnet.pt")
        rnet_path = os.path.join(model_dir, "rnet.pt")

        # Read model definition file
        model_def_path = os.path.join(model_dir, "FaceDetector.py")

        if not os.path.isfile(model_def_path):
            raise RuntimeError("Missing the model def file")

        # Load trained weights
        self.model = FaceDetector(device=self.device)
        self.model.onet.load_state_dict(
            torch.load(onet_path, map_location=self.device))
        self.model.pnet.load_state_dict(
            torch.load(pnet_path, map_location=self.device))
        self.model.rnet.load_state_dict(
            torch.load(rnet_path, map_location=self.device))
        self.model.eval()

        self.initialized = True
Example #3
0
    def initVideo(self):
        try:
            self.video = VideoStream(src=0)
        except:
            print("video stream not found")
        if(self.video is None):
            print("video stream was not initialized")
            return

        try:
            self.video.start()
        except:
            print("video failed to start")

        # construct the face detector and allow the camera to warm up
        try:
            face = "cascades/haarcascade_frontalface_default.xml"
            self.faceDetector = FaceDetector(face)
            sleep(0.1)
        except:
            print("face detector init failed")

        # choose xvid codec
        try:
            self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        except:
            print("video writer not found")

        sleep(0.1)
Example #4
0
def main(rootDir):
    detector = FaceDetector()
    for iDir, subjectDirs, files in os.walk(rootDir):
        for subjectDir in subjectDirs:
            print('Walking subject folder at %s' % subjectDir)
            subjectPath = os.path.join(rootDir, subjectDir)
            # Each session
            for jDir, sessionDirs, subjectFiles in os.walk(subjectPath):
                for sessionDir in sessionDirs:
                    print('Walking sessions folder at %s' % sessionDir)
                    sessionPath = os.path.join(subjectPath, sessionDir)
                    img = cv2.imread(sessionPath + '/im0.bmp', 0)
                    face_coords = detector.detect(img)
                    for i in range(4):
                        imgFilePath = sessionPath + '/im%s.bmp' % i
                        imgCroppedFilePath = sessionPath + '/im%s_cropped.bmp' % i
                        if os.path.isfile(imgFilePath):
                            print('Cropping the image %s' % imgFilePath)
                            img = cv2.imread(imgFilePath)
                            if face_coords is None:
                                print(
                                    'Did not find a face, just using normal image'
                                )
                                cv2.imwrite(imgCroppedFilePath, img)
                            else:
                                face = detector.crop_face(img, face_coords)
                                cv2.imwrite(imgCroppedFilePath, face)
Example #5
0
    def init(self):
        self.faceDetector = FaceDetector()
        self.faceEncoder = FaceEncoder()
        self.camera = Camera()

        self.faceImageArray = None
        self.faceEncoding = None
Example #6
0
def make_prediction():
    themodels = train_models()
    #rec_eig = themodels[0]
    #fish = themodels[1]
    lbph = themodels[2]

    labels_dic = themodels[3]

    vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    faces_coord = detector.detect(frame)
    faces = normalize_faces(frame, faces_coord)
    face = faces[0]

    #prediction, confidence = rec_eig.predict(face)
    #print ('Eigen faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    #prediction, confidence = fish.predict(face)
    #print ('Fisher Faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    prediction, confidence = lbph.predict(face)
    print('LBPH -> prediction: ' + labels_dic.get(prediction).capitalize() +
          " Confidence: " + str(round(confidence)))
Example #7
0
def make_prediction():
    themodels = train_models()
    #rec_eig = themodels[0]
    #fish = themodels[1]
    lbph = themodels[2]

    labels_dic = themodels[3]

    webcam = VideoCamera()
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    frame = webcam.get_frame()
    faces_coord = detector.detect(frame)
    faces = normalize_faces(frame, faces_coord)
    face = faces[0]

    plt.imshow(face)
    plt.show()
    del webcam

    #prediction, confidence = rec_eig.predict(face)
    #print ('Eigen faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    #prediction, confidence = fish.predict(face)
    #print ('Fisher Faces -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))

    prediction, confidence = lbph.predict(face)
    print ('LBPH -> prediction: ' + labels_dic.get(prediction).capitalize() + " Confidence: " + str(round(confidence)))
Example #8
0
def collectImages():
    folder = "people/" + raw_input('Person: ').lower()
    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)

    if not os.path.exists(folder):
        detector = FaceDetector('haarcascade_frontalface_default.xml')
        vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
        time.sleep(2.0)
        os.makedirs(folder)
        counter = 0
        timer = 0
        while counter < 13:
            frame = vs.read()
            #frame = imutils.resize(frame, width=600)
            faces_coord = detector.detect(frame)
            if len(faces_coord) and timer % 700 == 50:
                faces = normalize_faces(frame, faces_coord)
                cv2.imwrite(folder + '/' + str(counter) + '.jpg', faces[0])
                print("Images saved: " + str(counter))
                counter += 1
            draw_rectangle(frame, faces_coord)
            cv2.imshow('Frame', frame)
            key = cv2.waitKey(1) & 0xFF
            timer += 50

            if key == ord("q"):
                break

        cv2.destroyAllWindows()
        vs.stop()
        runMenu()
    else:
        print("Name already taken")
        runMenu()
Example #9
0
    def run(self):
        print("Starting Detection")
        self.stopped = False
        fd = FaceDetector(0.5)
        detected = False
        # lc = LaserController(640,480)

        while not self.stopped:

            if len(self.frame) > 0:

                frame = fd.detect(self.frame)
                self.coordinates.emit((fd.startX, fd.startY))
                # lc.sendCoords(fd.startX, fd.startY, fd.endX, fd.endY)
                if fd.detected:
                    if not detected:
                        self.play.emit(1)
                    detected = True
                else:
                    if detected:
                        self.play.emit(2)
                    detected = False

                rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                h, w, ch = rgb_image.shape
                bytesPerLine = ch * w
                convertToQtFormat = QtGui.QImage(rgb_image.data, w, h,
                                                 bytesPerLine,
                                                 QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.image.emit(p)
        self.image.emit(QImage("resources/bg3.jpg"))
        self.finished.emit()
def main():
    # Instantiate Classes
    detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH)
    model = FaceModel()
    display = Display()
    capture = Capture()

    oldTime = time.time()
    i = 0
    subprocess.call(['speech-dispatcher'])

    while True:
        # Calculate time difference (dt), update oldTime variable
        newTime = time.time()
        dt = newTime - oldTime
        oldTime = newTime

        # Grab Frames
        frames = capture.read()

        # Detect face 20% of the time, eyes 100% of the time
        if i % 10 is 0:
            rects = detector.detect(frames)
        else:
            rects = detector.detect(frames, model.getPreviousFaceRects())
        i += 1

        # Add detected rectangles to model
        model.add(rects)

        # Render
        display.renderScene(frames['display'], model, rects)
        display.renderEyes(frames['color'], model)
Example #11
0
    def test_main(self):
        detector = FaceDetector('./data/test/2.png')
        results = detector.results()
        self.assertEqual(len(results), 1)

        result = results[0]
        self.assertEqual(result, [144, 36, 144, 144])  # x, y, w, h
Example #12
0
 def loadFaceDetector(self):
     """ Init and load FaceDetector class """
     self.setProgressDialog("Load Face Detector")
     self.faceDetector = FaceDetector(self.face_model, self.snapshot,
                                      self.gpu_id)
     self.confidenceSlider.setValue(self.faceDetector.confThreshold * 100)
     self.faceDetector.load(self.updateProgressDialog)
     self.videoManager.faceDetector = self.faceDetector
Example #13
0
def build_data_set(url=0, win_name="live!"):
    # Face Detection using Haar Cascades:
    # Goal: We will see the basics of face detection using Haar Feature-based Cascade Classifiers
    # Basics: Each feature is a single value obtained by subtracting sum of pixels under the white rectangle from sum
    #         of pixels under the black rectangle.
    # OpenCV already contains many pre-trained classifiers for face, eyes, smiles, etc.
    # Those XML files are stored in the opencv/data/haarcascades/ folder.
    # It is a machine learning based approach where a cascade function is trained from a lot of positive and
    # negative images. It is then used to detect objects in other images.

    video = VideoCamera(url)
    detector = FaceDetector('haarcascade_frontalface_default.xml')
    cv2.namedWindow(win_name, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(win_name, cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    name = ""
    number_of_pic = 10

    print("Enter Your name")
    while True:
        frame = video.get_frame()
        height, width, _ = frame.shape

        cv2.imshow(win_name, frame)

        key = cv2.waitKey(40) & 0xFF
        if key not in [8, 13, 27, 255]:
            name += chr(key)
            print(name)
        elif key == 8:
            name = name[:-1]
        elif key == 27:
            cv2.destroyAllWindows()
            return
        elif key == 13:
            folder = "people/" + name.lower()  # input name
            break

    if not os.path.exists(folder):
        os.mkdir(folder)
    init_pic = len(os.listdir(folder))
    counter = init_pic
    timer = 0
    while counter < number_of_pic + init_pic:  # take 10 photo
        frame = video.get_frame()
        faces_coordinates = detector.detect(frame)
        if len(faces_coordinates) and timer % 700 == 50:
            faces = normalize_faces(frame, faces_coordinates)
            cv2.imwrite(folder + '/' + str(counter) + '.jpg', faces[0])
            counter += 1
        cv2.imshow(win_name, frame)
        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        timer += 50
    cv2.destroyAllWindows()
    def init(self):
        self.faceDetector = FaceDetector()
        self.faceEncoder = FaceEncoder()
        self.camera = Camera()

        self.faceImageArray = None
        self.faceEncoding = None

        self.personAuthenticated = None
Example #15
0
 def __init__(self):
     self.current_image = 0
     self.file_list = []
     self.IMAGE_WIDTH = 640
     self.IMAGE_HEIGHT = 480
     self.face_detector = FaceDetector()
     self.mask_classifier = MaskClassifier()
     self.image_presenter = ImagePresenter()
     self.createUI()
Example #16
0
def main(leftpath, rightpath):

    imgL = cv2.imread(leftpath)
    imgR = cv2.imread(rightpath)

    imgL = cv2.pyrDown(imgL)
    imgR = cv2.pyrDown(imgR)

    detector = FaceDetector()
    lbp = LBP()

    face_coords = detector.detect(imgL)
    x, y, w, h = face_coords
    x -= 400
    y -= 400
    w += 800
    h += 800

    # imgL = detector.crop_face(imgL, (x, y, w, h))
    # imgR = detector.crop_face(imgR, (x, y, w, h))

    # cv2.imwrite('left.png', imgL)
    # cv2.imwrite('right.png', imgL)

    window_size = 3
    min_disp = 16
    num_disp = 112 - min_disp

    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=num_disp,
                                   blockSize=5,
                                   P1=8 * 3 * window_size**2,
                                   P2=32 * 3 * window_size**2,
                                   disp12MaxDiff=1,
                                   uniquenessRatio=10,
                                   speckleWindowSize=100,
                                   speckleRange=32)

    disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0

    h, w = imgL.shape[:2]
    f = 0.8 * w  # guess for focal length
    Q = np.float32([
        [1, 0, 0, -0.5 * w],
        [0, -1, 0, 0.5 * h],  # turn points 180 deg around x-axis,
        [0, 0, 0, -f],  # so that y-axis looks up
        [0, 0, 1, 0]
    ])

    points = cv2.reprojectImageTo3D(disp, Q)

    out_points = points  # [mask]
    out_points = out_points[:, :, 2]
    print(out_points)
    hist, bins = lbp.run(out_points, False)
    print('hist', hist)
Example #17
0
    def build(self):
        #initialize the FaceDetector by calculating face encodings for all available skype pictures
        self.face_detector = FaceDetector()
        self.face_detector.get_face_encodings()

        self.root = BoxLayout(orientation='vertical')
        """create the first horizontal box with a camera object on the left and a resulting picture on the right"""
        self.horizontalBox1 = BoxLayout(orientation='horizontal')

        # Create a camera object
        self.cameraObject = Camera(play=False)
        self.cameraObject.play = True
        #self.cameraObject.resolution = (1024, 1024)  # Specify the resolution
        #self.cameraObject.size = (500, 500)

        self.im1 = Image(source="input_image.png")

        self.horizontalBox1.add_widget(self.im1)
        """create the second horizontal box with resulting text"""
        self.horizontalBox2 = Label(
            text="Which colleague looks most like you!?", font_size='20sp')
        """create the third horizontal box with a button to start the lookalike process"""
        self.horizontalBox3 = BoxLayout(orientation='horizontal')

        # Create a button for taking photograph
        self.cameraButton = Button(text="Show me my lookalike!",
                                   size_hint=(.3, .3),
                                   background_normal='',
                                   background_color=(255, 0, 0, 0.2),
                                   pos_hint={
                                       'center_x': 0.5,
                                       'center_y': 0.5
                                   })
        # bind the button's on_press to onCameraClick
        self.cameraButton.bind(on_press=self.onCameraClick)
        self.horizontalBox3.add_widget(self.cameraButton)
        """create the second horizontal box with a button to start the lookalike process"""
        self.horizontalBox3 = BoxLayout(orientation='horizontal')

        # Create a button for taking photograph
        self.cameraButton = Button(text="Show me my lookalike!",
                                   size_hint=(.3, .3),
                                   pos_hint={
                                       'center_x': 0.5,
                                       'center_y': 0.5
                                   })
        # bind the button's on_press to onCameraClick
        self.cameraButton.bind(on_press=self.onCameraClick)
        self.horizontalBox3.add_widget(self.cameraButton)
        """add all boxes to the root of the Kivy app"""
        self.root.add_widget(self.horizontalBox1)
        self.root.add_widget(self.horizontalBox2)
        self.root.add_widget(self.horizontalBox3)

        # return the root widget
        return self.root
Example #18
0
 def __init__(self):
     self.__video = cv2.VideoCapture(0)
     self.__detector = FaceDetector()
     self.__count_frames = 0
     self.__last_second = 0
     self.__total_frames = 0
     self.__is_video_running = False
     self.__numpay_frame = None
     self.__now = None
     self.__face_detector = FaceDetector()
Example #19
0
def main():
    detector = FaceDetector(model_folder='model',
                            ctx=mx.cpu(0),
                            num_worker=4,
                            accurate_landmark=False)

    OriginFace = cv2.imread('face.jpg', cv2.IMREAD_COLOR)
    ResultOrigin = detector.detect_face(OriginFace)
    P = ResultOrigin[1][0]
    origin_points = np.array([(int(P[0]), int(P[5])), (int(P[1]), int(P[6])),
                              (int(P[2]), int(P[7])), (int(P[3]), int(P[8])),
                              (int(P[4]), int(P[9]))])
    swap.initSwappingModule(OriginFace, origin_points)

    camera = cv2.VideoCapture(0)
    while True:
        grab, frame = camera.read()
        img = cv2.resize(frame, (320, 180))

        t1 = time.time()
        results = detector.detect_face(img)
        print('time: ', time.time() - t1)

        if results is None:
            continue

        total_boxes = results[0]
        points = results[1]

        draw = img.copy()
        for b in total_boxes:
            cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])),
                          (255, 255, 255))

        result = draw.copy()

        for p in points:
            for i in range(5):
                cv2.circle(draw, (p[i], p[i + 5]), 1, (255, 0, 0), 2)

            origin_points = np.array([(int(p[0]), int(p[5])),
                                      (int(p[1]), int(p[6])),
                                      (int(p[2]), int(p[7])),
                                      (int(p[3]), int(p[8])),
                                      (int(p[4]), int(p[9]))])
            result = swap.swap(result, origin_points)
            cv2.imshow("FaceSwap", result)

        cv2.imshow("detection result", draw)

        if cv2.waitKey(1) & 0xFF == ord('x'):
            break
    camera.release()
    cv2.destroyAllWindows()
    def __init__(self, pathOfImage):
        self.humanFaceDetector = FaceDetector()
        self.dogDetector = DogDetector()

        self.facesInImage = self.humanFaceDetector.faces(pathOfImage)
        targetImage = image.load_img(pathOfImage, target_size=(224, 224))
        imageArray = image.img_to_array(targetImage)
        self.imageTensor = np.expand_dims(imageArray, axis=0)
        self.imageContainsADog = self.dogDetector.containsADog(
            self.imageTensor)
        with open("dogBreedNames.txt", "rb") as fp:
            self.dogBreedNames = pickle.load(fp)
Example #21
0
def live_recognition():
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    webcam = VideoCamera(0)
    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)
    models = train_models()

    lbph = models[2]
    labels_dic = models[3]

    while True:
        frame = webcam.get_frame()
        faces_coord = detector.detect(frame, True) #detects more than 1 face

        if len(faces_coord) > 0:
            faces = normalize_faces(frame, faces_coord) #normalize
            for i, face in enumerate(faces): #for each detected face
                pred, conf = lbph.predict(face)
                threshold = 45
                print ("Prediction: " + labels_dic[pred].capitalize() + '\nConfidence: ' + str(round(conf)))
                clear_output(wait = True)

                if conf < threshold:
                    cv2.putText(frame,
                                labels_dic[pred].capitalize(),
                                (faces_coord[i][0], faces_coord[i][1] - 10),
                                cv2.FONT_HERSHEY_PLAIN,
                                3,
                                (66, 53, 243),
                                2,
                                cv2.LINE_AA)
                else:
                    cv2.putText(frame,
                        "Unknown",
                        (faces_coord[i][0], faces_coord[i][1]),
                        cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2)

            draw_rectangle(frame, faces_coord)
            cv2.imshow("Testing", frame)
            if cv2.waitKey(40) & 0xFF == 27:
                del frame
                cv2.destroyAllWindows()
                break
        else:
            cv2.putText(frame, "ESC to exit", (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2, cv2.LINE_AA)
            cv2.imshow("Testing", frame)
            if cv2.waitKey(40) & 0xFF == 27:
                del frame
                cv2.destroyAllWindows()
                break
Example #22
0
def index():
    if request.method == 'GET':
        gcp_test(storage_client)

        try:
            gcp_test(storage_client)
            return "Success"
        except:
            return "Something went wrong"
    else:
        filename = request.form['filename']
        bucketname = "uofthacksvii"

        download_blob(storage_client, bucketname, filename, filename)

        # Lip tracking + model inferenece
        input_array = FaceDetector(filename)
        print(input_array.shape)
        prediction = evaluate_model(input_array).replace('-', ' ').capitalize()

        # Construct gcp url so frontend can stream it to webpage
        gcp_url = "https://storage.cloud.google.com/%s/%s" % (bucketname,
                                                              filename)

        out_dict = {"url": gcp_url, "text": prediction}

        # Emit the url on the socket - frontend should be listening to this
        socketio.emit("FromAPI", out_dict)

        return "Success"
Example #23
0
    def long_running(self):
        print("Starting")
        self.stopped = False
        self.videoStream = cv2.VideoCapture(0)
        fd = FaceDetector(0.5)
        cl = Calibration()
        detected = False
        # lc = LaserController(640,480)

        while not self.stopped:
            ret, frame = self.videoStream.read()
            if ret:
                # frame = cl.mapImage(frame)
                # https://stackoverflow.com/a/55468544/6622587
                # frame = fd.detect(frame)
                self.frame.emit(frame)
                # self.coordinates.emit((fd.startX, fd.startY))
                # lc.sendCoords(fd.startX, fd.startY, fd.endX, fd.endY)
                # if fd.detected:
                #     if not detected:
                #         self.play.emit(1)
                #     detected = True
                # else:
                #     if detected:
                #         self.play.emit(2)
                #     detected = False

                # rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                # h, w, ch = rgb_image.shape
                # bytesPerLine = ch * w
                # convertToQtFormat = QtGui.QImage(rgb_image.data, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
                # p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                # self.image.emit(p)
        # self.image.emit(QImage("resources/bg3.jpg"))
        self.finished.emit()
Example #24
0
    def __init__(self,
                 frame,
                 EYE_AR_THRESH=0.3,
                 ROLL_THRESH=20,
                 TIME_THRESH=10):

        self.EYE_AR_THRESH = EYE_AR_THRESH
        self.ROLL_THRESH = ROLL_THRESH
        self.TIME_THRESH = TIME_THRESH
        self.ALARM_ON = False
        self.T = None

        self.faceDetector = FaceDetector()
        self.eyeDetector = EyeDetector()
        self.markDetector = MarkDetector(self.faceDetector)

        # Setup process and queues for multiprocessing.
        self.img_queue = Queue()
        self.box_queue = Queue()
        self.img_queue.put(frame)
        self.box_process = Process(target=self.get_face,
                                   args=(self.markDetector, ))
        self.box_process.start()

        h, w = frame.shape[:2]
        self.poseEstimator = PoseEstimator(img_size=(h, w))
        self.pose_stabilizers = [
            Stabilizer(state_num=2,
                       measure_num=1,
                       cov_process=0.1,
                       cov_measure=0.1) for _ in range(6)
        ]
Example #25
0
 def __init__(self, idx, window):
     self.idx = idx
     self.update_flag = True
     self.info_label = tk.Label(window,
                                text="Monitor [ %d ] @ %2d (fps)" %
                                (idx + 1, 0),
                                font=('Arial', 10))
     self.info_label.grid(row=3, column=idx + 1)
     img = Image.fromarray(np.full((240, 320), 100))
     img_tk = ImageTk.PhotoImage(img)
     self.frame_label = tk.Label(window)
     self.frame_label.configure(image=img_tk)
     self.frame_label.grid(row=4, column=idx + 1, ipadx=5, ipady=10)
     self.start_btn = tk.Button(window,
                                text='Start',
                                command=self.click_button,
                                width=39,
                                font=('Arial', 10))
     self.start_btn.grid(row=5, column=idx + 1)
     self.split_btn = tk.Button(window,
                                text='Split This Window',
                                command=self.split_window,
                                width=39,
                                font=('Arial', 10))
     self.split_btn.grid(row=6, column=idx + 1)
     self.face_detect_flag = tk.BooleanVar()
     self.face_detect_checkbutton = tk.Checkbutton(
         window,
         text="Open Face Detection   (Detect face in Split window)",
         variable=self.face_detect_flag)
     self.face_detect_checkbutton.grid(row=7,
                                       column=idx + 1,
                                       sticky='w',
                                       ipadx=2)
     self.face_save_flag = tk.BooleanVar()
     self.face_save_checkbutton = tk.Checkbutton(
         window,
         text="Save Face Image     (Save face when face detected)",
         variable=self.face_save_flag)
     self.face_save_checkbutton.grid(row=8,
                                     column=idx + 1,
                                     sticky='w',
                                     ipadx=2)
     self.face_detector = FaceDetector()
     self.max_face_store_number = 200
     self.thread_list = []
     self.tcp = None
class DogBreedClassifier:
    def __init__(self, pathOfImage):
        self.humanFaceDetector = FaceDetector()
        self.dogDetector = DogDetector()

        self.facesInImage = self.humanFaceDetector.faces(pathOfImage)
        targetImage = image.load_img(pathOfImage, target_size=(224, 224))
        imageArray = image.img_to_array(targetImage)
        self.imageTensor = np.expand_dims(imageArray, axis=0)
        self.imageContainsADog = self.dogDetector.containsADog(
            self.imageTensor)
        with open("dogBreedNames.txt", "rb") as fp:
            self.dogBreedNames = pickle.load(fp)

    def predictBreedInImage(self):
        if self.imageContainsADog:
            prediction = self.predictBreed(self.imageTensor)
            if prediction[0][1] > 0.7:
                return (
                    "Hi! the dog in the image is {}.. Prediction Confidence {:.2f}"
                    .format(prediction[0][0], prediction[0][1] * 100.))
            else:
                return (
                    "Hi! the dog in the picture looks like {:.2f}% {} and {:.2f}% {}"
                    .format(prediction[0][1] * 100., prediction[0][0],
                            prediction[1][1] * 100., prediction[1][0]))
        elif self.facesInImage.count > 0:
            prediction = self.predictBreed(self.imageTensor)
            return ("Hi Human! You look like {:.2f}% {} and {:.2f}% {}".format(
                prediction[0][1] * 100., prediction[0][0],
                prediction[1][1] * 100., prediction[1][0]))
        else:
            return (
                'Sorry, we currently offer our service only to dogs and humans who look like dogs. If you are a self obsessed cat or something, stay tuned for our updates!!'
            )

    def predictBreed(self, imageTensor):
        bottleneck_feature = VGG19(weights='imagenet',
                                   include_top=False).predict(
                                       preprocess_input(imageTensor))

        VGG19_model = Sequential()
        VGG19_model.add(GlobalAveragePooling2D(input_shape=(7, 7, 512)))
        VGG19_model.add(Dropout(0.25))
        VGG19_model.add(BatchNormalization())
        VGG19_model.add(Activation('elu'))
        VGG19_model.add(Dense(133, activation='softmax'))
        VGG19_model.load_weights('saved_models/weights.best.VGG19.hdf5')

        predicted_vector = VGG19_model.predict(bottleneck_feature)
        predictionLikelihoodOrder = np.argsort(-predicted_vector)

        prediction = []
        for predictionIndex in predictionLikelihoodOrder:
            prediction.append((self.dogBreedNames[predictionIndex[0]],
                               predicted_vector.item(predictionIndex[0])))
        return prediction
Example #27
0
 def __init__(self, mark_model='models/landmark_detector/frozen_inference_graph.pb'):
     '''initialize MarkDetector
     Input: mark_model --- path to the tensorflow model'''
     self.face_detector = FaceDetector()   # Initialize face detector
     self.cnn_input_size=128  # CNN input dimension
     self.marks = None
     
     # Get a tensorflow session ready for landmark detection
     # load a frozen tensorflow model into memory
     detection_graph = tf.Graph()
     with detection_graph.as_default():
         od_graph_def = tf.GraphDef()
         with tf.gfile.GFile(mark_model, 'rb') as fid:
             serialized_graph = fid.read()
             od_graph_def.ParseFromString(serialized_graph)
             tf.import_graph_def(od_graph_def, name='')
     self.graph=detection_graph
     self.sess = tf.Session(graph=detection_graph)
Example #28
0
def live_recognition():
    models = train_models()
    lbph = models[2]
    labels_dic = models[3]
    threshold = 100
    detector = FaceDetector("haarcascade_frontalface_default.xml")
    vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
    time.sleep(2.0)

    while True:

        frame = vs.read()
        #frame = imutils.resize(frame, width=600)
        faces_coord = detector.detect(frame, True)

        if len(faces_coord) > 0:
            faces = normalize_faces(frame, faces_coord)
            for i, face in enumerate(faces):
                pred, conf = lbph.predict(face)
                print("Prediction: " + labels_dic[pred].capitalize() +
                      '\nConfidence: ' + str(round(conf)))

                if conf < threshold:
                    if conf < threshold:
                        cv2.putText(
                            frame, labels_dic[pred].capitalize(),
                            (faces_coord[i][0], faces_coord[i][1] - 10),
                            cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2,
                            cv2.LINE_AA)
                else:
                    cv2.putText(frame, "Unknown",
                                (faces_coord[i][0], faces_coord[i][1]),
                                cv2.FONT_HERSHEY_PLAIN, 3, (66, 53, 243), 2)

                draw_rectangle(frame, faces_coord)
                cv2.imshow("Frame", frame)
                key = cv2.waitKey(1) & 0xFF

                if key == ord("q"):
                    break
    cv2.destroyAllWindows()
    vs.stop()
    runMenu()
Example #29
0
def main():
    bino_cam = BinoCamera(0, True)
    bino_cam.set_width_height((2560, 720))
    face_detector = FaceDetector()
    # The width of frame is the half of camera width.
    distance_predictor = DistancePredictor(90, bino_cam.get_width_height()[0] / 2, bino_cam.get_width_height()[1], 60)
    distance = 0
    distance_threshold = 2000.0    #mm
    monitor_flag = True

    while True:
        if not bino_cam.capture():
            break

        Drawer.draw_lines(bino_cam.get_lframe())
        Drawer.draw_lines(bino_cam.get_rframe())
        left_face = face_detector.detect_face(bino_cam.get_lframe())
        right_face = face_detector.detect_face(bino_cam.get_rframe())

        if left_face:
            Drawer.dotted_rectangle(bino_cam.get_lframe(), (left_face.get_x(), left_face.get_y()),
                                    (left_face.get_x() + left_face.get_width(),
                                     left_face.get_y() + left_face.get_height()), (0, 0, 255), 2)

        if right_face:
            Drawer.dotted_rectangle(bino_cam.get_rframe(), (right_face.get_x(), right_face.get_y()),
                                    (right_face.get_x() + right_face.get_width(),
                                     right_face.get_y() + right_face.get_height()), (0, 0, 255), 2)

        if left_face and right_face:
            distance = distance_predictor.predict_distance((left_face.get_x(), left_face.get_y()), (right_face.get_x(), right_face.get_y()))
        Drawer.text(bino_cam.get_lframe(), "Distance(m): %.2fm" % (distance / 1000), (20, 40), 1.0, (0, 0, 255), 2)
        print(distance / 1000)

        if 0 < distance < distance_threshold and monitor_flag:
            Controller.set_top_window("Unity 2019.1.8f1 - mainScene.unity - Learn unity editor script - PC, Mac & Linux Standalone <DX11>")
            monitor_flag = False

        cv2.imshow("I am a good stuff v1.0 @ FPS: %d" % bino_cam.get_fps(), np.hstack((bino_cam.get_lframe(), bino_cam.get_rframe())))
        if cv2.waitKey(1) == ord('q'):
            break
    cv2.destroyAllWindows()
Example #30
0
def collectImages():
    folder = "people/" + raw_input('Person: ').lower()
    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)
    cap = VideoCamera()
    detector = FaceDetector('haarcascade_frontalface_default.xml')

    if not os.path.exists(folder):
        os.makedirs(folder)
        counter = 0
        timer = 0

        while counter < 10:
            try:
                frame = cap.get_frame()
                rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)

                faces_coord = detector.detect(rgb)
                if len(faces_coord) and timer % 700 == 50:
                    faces = normalize_faces(frame, faces_coord)
                    cv2.imwrite(folder + "/" + str(counter) + ".jpg", faces[0])
                    print ("Images saved: " + str(counter))
                    counter += 1

                draw_rectangle(frame, faces_coord)
                cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
                cv2.imshow('frame', frame)
                cv2.waitKey(50)
                timer += 50
            except KeyboardInterrupt:
                del cap
                cv2.destroyAllWindows()
                print ("Live Video interrupted")
                break
    else:
        del cap
        cv2.destroyAllWindows()
        print ("This name already taken")

    del cap
    cv2.destroyAllWindows()
Example #31
0
from FaceDetector import FaceDetector
import cv2

a = cv2.imread('testimg.jpg')
c = FaceDetector(a)

d = c.get_faces()

cv2.imshow('img',d[1])
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #32
0
import numpy as np
import cv2
from FaceDetector import FaceDetector

cap = cv2.VideoCapture(0)

c = FaceDetector()
while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    if ret:
        c.update_img(frame)
        d = c.get_faces()
    else:
        d = (0, frame)

    cv2.imshow('frame',d[1])
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()