Ejemplo n.º 1
0
    def update(self):
        """
        Update the real time prediction on the screen
        """
        self.counter += 1
        camera_open, frame = self.video.getFrame()
        # process image here
        if self.counter % 10 == 0:
            self.counter = 0
            image, predictions = detect_face(frame, self.model, self.detector,
                                             self.predictor)
            if len(predictions) > 0:
                emotion = predictions[0]
                # FER2013: 0 = Angry, 1 = Disgust, 2 = Fear, 3 = Happy, 4 = Sad, 5 = Surprise, 6 = Neutral
                self.angerLabel["text"] = "angry:" + str(round(emotion[0], 3))
                self.disgustLabel["text"] = "disgust:" + str(
                    round(emotion[1], 3))
                self.fearLabel["text"] = "fear:" + str(round(emotion[2], 3))
                self.happyLabel["text"] = "happy:" + str(round(emotion[3], 3))
                self.sadnessLabel["text"] = "sad:" + str(round(emotion[4], 3))
                self.surpriseLabel["text"] = "surprise:" + str(
                    round(emotion[5], 3))
                self.neutralLabel["text"] = "neutral:" + str(
                    round(emotion[6], 3))

        if camera_open:
            self.cur_frame = ImageTk.PhotoImage(image=Image.fromarray(frame))
            self.canvas.create_image(0, 0, image=self.cur_frame, anchor=NW)

        self.window.after(30, self.update)  # rate to call update function
Ejemplo n.º 2
0
 def snapshot(self):
     """
     used to capture the image with emoji
     """
     captured, frame = self.video.getFrame()
     frame, predictions = detect_face(frame, self.model, self.detector,
                                      self.predictor)
     if captured:
         image = "IMG-" + time.strftime("%H-%M-%S-%d-%m") + ".jpg"
         cv.imwrite(image, cv.cvtColor(frame, cv.COLOR_BGR2RGB))
         Label(self.window, text="image saved").place(x=430, y=510)
Ejemplo n.º 3
0
 def get_stats(self):
     """
     Get the predicted emotion
     """
     camera_open, frame = self.video.getFrame()
     image, predictions = detect_face(frame, self.model, self.detector,
                                      self.predictor)
     if len(predictions) > 0:
         emotion = predictions[0]
         # FER2013: 0 = Angry, 1 = Disgust, 2 = Fear, 3 = Happy, 4 = Sad, 5 = Surprise, 6 = Neutral
         self.angerLabel["text"] = "angry:" + str(emotion[0])
         self.disgustLabel["text"] = "disgust:" + str(emotion[1])
         self.fearLabel["text"] = "fear:" + str(emotion[2])
         self.happyLabel["text"] = "happy:" + str(emotion[3])
         self.sadnessLabel["text"] = "sad:" + str(emotion[4])
         self.surpriseLabel["text"] = "surprise:" + str(emotion[5])
         self.neutralLabel["text"] = "neutral:" + str(emotion[6])
Ejemplo n.º 4
0
                    print('Read data dimension: ', img.ndim)
                except (IOError, ValueError, IndexError) as e:
                    errorMessage = '{}: {}'.format(image_path, e)
                    print(errorMessage)
                else:
                    if img.ndim < 2:
                        print('Error! Unable to align "%s"' % image_path)
                        text_file.write('%s\n' % (output_filename))
                        continue
                    if img.ndim == 2:
                        img = facenet.to_rgb(img)
                        print('to_rgb data dimension: ', img.ndim)
                    img = img[:, :, 0:3]
                    print('After data dimension: ', img.ndim)

                    bounding_boxes, _ = detect_face.detect_face(
                        img, minsize, pnet, rnet, onet, threshold, factor)
                    nrof_faces = bounding_boxes.shape[0]
                    print('Number of Detected Face(s): %d' % nrof_faces)
                    if nrof_faces > 0:
                        det = bounding_boxes[:, 0:4]
                        img_size = np.asarray(img.shape)[0:2]
                        if nrof_faces > 1:
                            bounding_box_size = (det[:, 2] - det[:, 0]) * (
                                det[:, 3] - det[:, 1])
                            img_center = img_size / 2
                            offsets = np.vstack([
                                (det[:, 0] + det[:, 2]) / 2 - img_center[1],
                                (det[:, 1] + det[:, 3]) / 2 - img_center[0]
                            ])
                            offset_dist_squared = np.sum(
                                np.power(offsets, 2.0), 0)
Ejemplo n.º 5
0
def detect(frame, pnet, rnet, onet, faceTrackers):
    big_image = frame.copy()
    big_height = big_image.shape[0]
    big_width = big_image.shape[1]

    small_image = imutils.resize(frame, width=450)
    small_image = cv2.cvtColor(small_image, cv2.COLOR_BGR2GRAY)
    small_image = np.dstack([small_image, small_image, small_image])

    small_height = small_image.shape[0]
    small_width = small_image.shape[1]

    gray = cv2.cvtColor(small_image, cv2.COLOR_BGR2GRAY)
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709
    if gray.ndim == 2:
        img = to_rgb(gray)
    bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet,
                                                threshold, factor)
    nrof_faces = bounding_boxes.shape[0]
    for face_position in bounding_boxes:
        face_position = face_position.astype(int)

        left_top = get_point(face_position[0], face_position[1], small_width,
                             small_height, big_width, big_height)
        right_bottom = get_point(face_position[2], face_position[3],
                                 small_width, small_height, big_width,
                                 big_height)

        x = left_top[0]
        y = left_top[1]
        w = right_bottom[0] - left_top[0]
        h = right_bottom[1] - left_top[1]

        x_bar = x + 0.5 * w
        y_bar = y + 0.5 * h

        matchedFid = None

        for fid in faceTrackers.keys():
            tracked_position = faceTrackers[fid].get_position()

            t_x = int(tracked_position.left())
            t_y = int(tracked_position.top())
            t_w = int(tracked_position.width())
            t_h = int(tracked_position.height())

            t_x_bar = t_x + 0.5 * t_w
            t_y_bar = t_y + 0.5 * t_h

            if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h))
                    and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                       (y + h))):
                matchedFid = fid
        if matchedFid is None:
            print("Creating new tracker ")

            tracker = dlib.correlation_tracker()
            tracker.start_track(
                big_image,
                dlib.rectangle(x - 20, y - 20, x + w + 20, y + h + 20))

            faceTrackers[len(faceTrackers)] = tracker
    return (faceTrackers, big_image)
Ejemplo n.º 6
0
    def identify(self, frame):

        # Force frame to have RGB channels
        if frame.ndim == 2:
            frame = facenet.to_rgb(frame)
        frame = frame[:, :, 0:3]

        # Get the bounding boxes
        bounding_boxes, _ = detect_face.detect_face(frame, self.minsize,
                                                    self.pnet, self.rnet,
                                                    self.onet, self.threshold,
                                                    self.factor)

        # Get the number of faces detected
        nrof_faces = bounding_boxes.shape[0]

        if nrof_faces > 1:
            print('[AUTH_ERROR] Multiple equidistant faces detected!')
            print('[AUTH_ERROR] Please maintain one-by-one queue!')
            return 1
        elif nrof_faces == 1:
            bb = np.zeros((nrof_faces, 4), dtype=np.int32)
            bb[0][0] = bounding_boxes[0][0]
            bb[0][1] = bounding_boxes[0][1]
            bb[0][2] = bounding_boxes[0][2]
            bb[0][3] = bounding_boxes[0][3]

            # Get the frame size
            img_size = np.asarray(frame.shape)[0:2]

            # For storing cropped, scaled and scaled+reshaped image
            cropped = None
            scaled = None
            scaled_reshape = None

            # Create Embedding array
            emb_array = np.zeros((1, self.embedding_size))

            # Bounding box out of frame size range exception
            if bb[0][0] <= 0 or bb[0][1] <= 0 or bb[0][2] >= len(
                    frame[0]) or bb[0][3] >= len(frame[1]):
                print('[ERROR] Bounding Box out of frame size range!')
                return 2

            try:
                cropped = frame[bb[0][1]:bb[0][3], bb[0][0]:bb[0][2], :]
                cropped = facenet.flip(cropped, False)

                scaled = np.array(
                    Image.fromarray(cropped).resize(
                        (self.image_size, self.image_size),
                        resample=Image.BILINEAR))
                scaled = cv2.resize(
                    scaled, (self.input_image_size, self.input_image_size),
                    interpolation=cv2.INTER_CUBIC)
                scaled = facenet.prewhiten(scaled)

                scaled_reshape = scaled.reshape(-1, self.input_image_size,
                                                self.input_image_size, 3)

                feed_dict = {
                    self.images_placeholder: scaled_reshape,
                    self.phase_train_placeholder: False
                }

                emb_array[0, :] = self.sess.run(self.embeddings,
                                                feed_dict=feed_dict)

                predictions = self.model.predict_proba(emb_array)
                # print('predictions:', predictions)

                best_class_indices = np.argmax(predictions, axis=1)
                # print('best_class_indices:', best_class_indices)

                best_class_probabilities = predictions[
                    np.arange(len(best_class_indices)), best_class_indices]
                # print('best_class_probabilities:', best_class_probabilities)

            except Exception as e:
                print('[ERROR]', e)
                return 3

            if best_class_probabilities[0] > 0.7:
                return (self.faceIds[best_class_indices[0]], bb,
                        best_class_probabilities[0])
            else:
                print('[AUTH_ERROR] ACCESS DENIED!')
                return 4

        else:
            print('[INFO] No detected face in the threshold vicinity!')
            return 5
Ejemplo n.º 7
0
def mtcnn_simplify(image_path, image_size, margin, gpu_memory_fraction,
                   detect_multiple_faces):

    sleep(random.random())

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    try:
        img = misc.imread(image_path)
    except (IOError, ValueError, IndexError) as e:
        errorMessage = '{}: {}'.format(image_path, e)
        print(errorMessage)
    else:
        if img.ndim < 2:
            print('Unable to align "%s"' % image_path)
            os._exit()
        if img.ndim == 2:
            img = function.to_rgb(img)
        img = img[:, :, 0:3]

        bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet,
                                                    onet, threshold, factor)
        nrof_faces = bounding_boxes.shape[0]
        # --------需要修改,建立一个文件夹用来储存无法剪裁的文件
        with open('E:/Desktop/unableface.txt', 'w') as text:
            if nrof_faces > 0:
                det = bounding_boxes[:, 0:4]
                det_arr = []
                img_size = np.asarray(img.shape)[0:2]

                det_arr.append(np.squeeze(det))

                for i, det in enumerate(det_arr):
                    det = np.squeeze(det)
                    bb = np.zeros(4, dtype=np.int32)
                    bb[0] = np.maximum(det[0] - margin / 2, 0)
                    bb[1] = np.maximum(det[1] - margin / 2, 0)
                    bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
                    bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
                    cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                    # scaled 就是检测对齐剪裁过后的结果
                    img_mtcnn = misc.imresize(cropped,
                                              (image_size, image_size),
                                              interp='bilinear')
                    # ---------需要修改,为储存的目录
                    image_new_name = 'E:' + image_path.split(':')[-1].split(
                        '.')[0] + '.jpg'
                    image_new_path = os.path.dirname(image_new_name)
                    mkdir(image_new_path)
                    cv2.imwrite(image_new_name, img_mtcnn)
                    # print(image_new_name)
            else:
                print('unable to align"%s"' % image_path)
                text.write('%s\n' % (image_path))
Ejemplo n.º 8
0
def monitor(refreshTime):
    global count
    #1. Recognise the person using the computer, implement for multiple people later
    user_id, seconds = detect_face()

    if user_id ==  None: # No one in front of the computer --> lock or standby
        count = count+1
        if count > 3:
            os.popen('gnome-screensaver-command --lock')
            count = 0

        displayText = "No user found"

    elif user_id ==  -1: # An anonymous user is sitting in front of the computer --> Prevent keyboard, mouse input
        displayText = "Anonymous user"

    else: # The face detector has detected a person properly --> Regain mouse  keyboard input, increment screen time for applications    
        count = 0
        # get open apps
        open_apps = get_win_info()

        displayText, user, apps, isUnderAge, responseStatus = currentUserDetails(user_id)
        
        # verify open apps
        if (responseStatus == 200):

            print("Detected " + user)

            for app in apps:
                for open_app in open_apps:

                    pid, process_name, title = open_app

                    if (app['title'].lower() == process_name):
                        print("Observing " + title)
                        print("Time spent on " + app['title'] + " is " + str(app['time_today']) + ' seconds')
                        print("****************************************************************************")
                        increment(app, seconds + refreshTime)
                        

                        profanity = check_profanity(title)
                        if (profanity != False and isUnderAge == True): # If age restricted applications used by underage children at home
                            notif('AGE RESTRICTED CONTENT!!!', f'{profanity}. Your action will be reported!')
                            set_warning(app, profanity)
                            toggle_process(pid, 2)
                            break
                        
                        # time limit warning
                        remaining_time = app['time_limit'] - app['time_today']
                        print(remaining_time)
                        if (remaining_time > 2*refreshTime and remaining_time < 3*refreshTime ):
                            message = 'The time limit of ' + app['title'] + ' will be reached after ' + str(remaining_time) + ' seconds.'
                            notif('Session limit warning', message)

                        # time limit ended
                        if (remaining_time <= 0):
                            message = 'The time limit of ' + app['title'] + ' has been reached.'
                            notif('Time limit ended', message)
                            toggle_process(pid, 1)
                        else:
                            toggle_process(pid, 0)

    return displayText