コード例 #1
0
ファイル: id_data.py プロジェクト: cupid725/face-recognition
def align_id_dataset(id_dataset, pnet, rnet, onet):
    aligned_images = []

    for i in range(len(id_dataset)):
        image = misc.imread(os.path.expanduser(id_dataset[i].image_path), mode='RGB')
        face_patches, _, _ = detect_and_align.align_image(image, pnet, rnet, onet)
        aligned_images = aligned_images + face_patches

    aligned_images = np.stack(aligned_images)
    return aligned_images
コード例 #2
0
def main(image_path, ready=False):
    id_folder = ['ids']
    test_folder = None
    model = './model/'

    with tf.Graph().as_default():
        with tf.Session() as sess:

            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(id_folder[0], pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder, phase_train_placeholder, embeddings, id_dataset, test_folder)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False

            if ready is False:
                frame = cv2.imread(image_path)
            elif ready is True:
                frame = image_path

            face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

            if len(face_patches) > 0:
                face_patches = np.stack(face_patches)
                feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                embs = sess.run(embeddings, feed_dict=feed_dict)

                print('Matches in frame:')
                for i in range(len(embs)):
                    bb = padded_bounding_boxes[i]

                    matching_id, dist = find_matching_id(id_dataset, embs[i, :])
                    if matching_id:
                        print('Hi %s! Distance: %1.4f' % (matching_id, dist))
                    else:
                        matching_id = 'Unknown'
                        print('Unkown! Couldn\'t fint match.')
                    return matching_id
            else:
                matching_id = 'Face Unfound'
                print(matching_id)
                return matching_id
コード例 #3
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        ################## where the hard work is done ############
        # output_img is an PIL image
        # output_img = input_img #self.makeup_artist.apply_makeup(input_img)

        # output_str is a base64 string in ascii
        # output_str = pil_image_to_base64(output_img)

        # convert eh base64 string in ascii to base64 string in _bytes_
        # self.to_output.append(binascii.a2b_base64(output_str))

        open_cv_image = np.array(input_img)
        # Convert RGB to BGR
        open_cv_image = open_cv_image[:, :, ::-1].copy()

        print("Processing frame...")

        face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
            open_cv_image, self.pnet, self.rnet, self.onet)
        matching_id = "Unknown"
        if len(face_patches) > 0:
            face_patches = np.stack(face_patches)
            feed_dict = {
                self.images_placeholder: face_patches,
                self.phase_train_placeholder: False
            }
            embs = self.sess.run(self.embeddings, feed_dict=feed_dict)

            print('Matches in frame:')
            for i in range(len(embs)):
                bb = padded_bounding_boxes[i]

                matching_id, dist = self.find_matching_id(embs[i, :])
                if matching_id:
                    print('Hi %s! Distance: %1.4f' % (matching_id, dist))
                else:
                    matching_id = 'Unknown'
                    print('Unknown! Couldn\'t fint match.')

                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(open_cv_image, matching_id, (bb[0], bb[3]), font,
                            1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.rectangle(open_cv_image, (bb[0], bb[1]), (bb[2], bb[3]),
                              (255, 0, 0), 2)
        else:
            print("No face patches")

        match_dict = {}
        match_dict[matching_id] = open_cv_image
        # adding matching_name=>frame to array
        self.to_output.append(match_dict)
コード例 #4
0
def run():
    with tf.Session() as sess:

        LOG.log("Loading modell","SYSTEM")
        #temp test
        global pnet
        global rnet
        global onet
        global images_placeholder
        global embeddings
        global phase_train_placeholder
        
        pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)
        
        model_exp = os.path.expanduser(model_path)
        if (os.path.isfile(model_exp)):
           # print('Model filename: %s' % model_exp)
            with gfile.FastGFile(model_exp, 'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
                tf.import_graph_def(graph_def, name='')
            
        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")            
    
    
        # set up tensorflow model
        #load_model(model_path)

        LOG.log("Start system","SYSTEM")
        while True:
         
            if cam_cap.isOpened():
                start = time.time()
                # Get current frame
                global frame
                ret, frame = cam_cap.read()


                # Count tick
                global ticks
                ticks = ticks + 1

                # Do detection
                global DETECTION_SLEEP_TICKS
                if DETECTION_SLEEP_TICKS <= ticks:

                 #print("Detection")
                    global face_box
                    global face_found
                    
                    global show_id
                    global show_bb
                    global show_landmarks
    
                    # Do detection
                    face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

                    # if found faces
                    if len(face_patches) > 0:
                        face_patches = np.stack(face_patches)
                        feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
       
                        embs = sess.run(embeddings, feed_dict=feed_dict)

                       # print('Matches in frame:')
                        for i in range(len(embs)):
                            bb = padded_bounding_boxes[i]
            
                            if show_bb:
                                cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)

                            if show_landmarks:
                                for j in range(5):
                                    size = 1
                                    top_left = (int(landmarks[i, j]) - size, int(landmarks[i, j + 5]) - size)
                                    bottom_right = (int(landmarks[i, j]) + size, int(landmarks[i, j + 5]) + size)
                                    cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
        
                        # Convert box to OpenCV
                        
                        face_box = convert_tensorflow_box_to_openCV_box(padded_bounding_boxes[0])
                       # print (face_box)
                        
                        # if running custom tracker this is needed
                        update_custom_tracker()

                        face_found = True
                        #return True
        

                    else:
                    # No face
                        face_found = False
                    #return False

                    # if face found
                    

                    
                    if face_found:
                        ticks = 0
                        global FAST_DETECTION_SLEEP_TICKS
                        DETECTION_SLEEP_TICKS = FAST_DETECTION_SLEEP_TICKS
                    else:
                        # Make less detections if not
                        ticks = 0
                        global SLOW_DETECTION_SLEEP_TICKS
                        DETECTION_SLEEP_TICKS = SLOW_DETECTION_SLEEP_TICKS 
                else:
                    # Do tracking
                    if face_found:
                        object_custom_tracking()

                # print fps
                end = time.time()

                seconds = end - start
                if seconds != 0:
                    fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, 100), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                
                #Show Cam
                cv2.imshow('Detection GUI', frame)

                #Close Program functionallity
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cam_cap.release()
                    cv2.destroyAllWindows()
                    break

                time.sleep(0.2) # Sleep
コード例 #5
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            faceCascade = cv2.CascadeClassifier(
                'classifier/haarcascade_frontalface_default.xml')
            file = os.path.join(os.getcwd(), "log.csv")
            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(args.id_folder[0], pnet, rnet,
                                             onet, sess, embeddings,
                                             images_placeholder,
                                             phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder,
                     phase_train_placeholder, embeddings, id_dataset,
                     args.test_folder)

            cap = cv2.VideoCapture(0)
            cap.set(4, 9600)
            cap.set(3, 12800)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
            int_frame_height = int(frame_height)

            #            img_path = os.path.join(args.id_folder)
            #            f = open("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv", 'a')
            f = open(
                "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv",
                'a')
            #            df = pd.read_csv("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv")
            df = pd.read_csv(
                "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv"
            )
            #            df = pd.read_csv(f)
            #            print("df", df)
            #            columns = pd.read_csv("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv", nrows = 0)
            #            print('columns', columns)
            count = 0
            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            total_people = 10
            matched_ids = []
            ids = os.listdir('ids')
            while (True):
                start = time.time()
                _, frames = cap.read()

                gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
                count_msg = "Evacuated staff: " + str(count)
                emergency_msg = "Emergency Evacuation"
                remaining_msg = "Remaining staff: " + str(total_people - count)
                cv2.putText(frames, emergency_msg, (0, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 3,
                            cv2.LINE_AA)

                faces = faceCascade.detectMultiScale(
                    gray,
                    scaleFactor=1.05,
                    minNeighbors=8,
                    minSize=(55, 55),
                    flags=cv2.CASCADE_SCALE_IMAGE)
                imgs = []
                if len(ids) < 5:
                    show_missing(frames, ids, 0, int_frame_height)

                for (x, y, w, h) in faces:
                    crop_img = frames[y - 50:y + h + 25, x - 25:x + w + 25]
                    imgs.append(crop_img)

                for frame in imgs:
                    face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
                        frame, pnet, rnet, onet)
                    id_dataset = id_data.get_id_data(args.id_folder[0], pnet,
                                                     rnet, onet, sess,
                                                     embeddings,
                                                     images_placeholder,
                                                     phase_train_placeholder)

                    if len(face_patches) > 0:
                        face_patches = np.stack(face_patches)
                        feed_dict = {
                            images_placeholder: face_patches,
                            phase_train_placeholder: False
                        }
                        embs = sess.run(embeddings, feed_dict=feed_dict)

                        print('Matches in frame:')
                        for i in range(len(embs)):
                            bb = padded_bounding_boxes[i]

                            matching_id, dist = find_matching_id(
                                id_dataset, embs[i, :])
                            if matching_id:
                                print('Hi %s! Distance: %1.4f' %
                                      (matching_id, dist))

                                if matching_id not in matched_ids:
                                    matched_ids.append(matching_id)
                                    count = count + 1
                                    count_msg = "Evacuated staff: " + str(
                                        count)
                                    remaining_msg = "Remaining staff: " + str(
                                        total_people - count)
                                    if matching_id in ids:
                                        ids.remove(matching_id)

                            else:
                                matching_id = 'Unkown'
                                print('Unkown! Couldn\'t fint match.')
                                '''Make folder and store the image'''
                                try:
                                    #                                os.makedirs(os.path.join(args.id_folder, str(count)))
                                    #                                    dir1 = "D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(count)
                                    dir1 = "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(
                                        count)
                                    os.makedirs(dir1)

                                except OSError as e:
                                    if e.errno != errno.EEXIST:
                                        raise


#                                path_img = "D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(count) + "\\"
                                path_img = "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(
                                    count) + "\\"
                                img_name = path_img + str(count) + '.png'
                                cv2.imwrite(img_name, frame)
                                #                            start = time.time()
                                entry_time = datetime.datetime.fromtimestamp(
                                    start).strftime('%c')
                                df_tmp = pd.DataFrame(
                                    [[str(count), entry_time, "Still_Inside"]],
                                    columns=df.columns)
                                count = count + 1
                                count_msg = "Evacuated staff: " + str(count)
                                remaining_msg = "Remaining staff: " + str(
                                    total_people - count)
                                df = df.append(df_tmp)
                                df.to_csv(f, header=False, index=False)

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)

                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)

                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmarks[i, j]) - size,
                                            int(landmarks[i, j + 5]) - size)
                                bottom_right = (int(landmarks[i, j]) + size,
                                                int(landmarks[i, j + 5]) +
                                                size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                    else:
                        print('Couldn\'t find a face')
                        if len(ids) < 5:
                            show_missing(frames, ids, 0, int_frame_height)

                cv2.putText(frames, remaining_msg, (0, int_frame_height - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frames, count_msg, (0, 145),
                            cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2,
                            cv2.LINE_AA)

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frames, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frames)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    f.close()
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()