Exemplo n.º 1
0
    def init_tensor_flow(self):
        print("Initializing Tensor-flow ...")
        self.graph = tf.Graph()
        self.sess = tf.Session()

        print("Tensorflow session created ")
        self.pnet, self.rnet, self.onet = detect_and_align.create_mtcnn(
            self.sess, None)
        print("Loading model file...")
        # Load the model
        self.load_model('./model/')
        print("Model file loaded...")
        self.images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        self.embeddings = tf.get_default_graph().get_tensor_by_name(
            "embeddings:0")
        self.phase_train_placeholder = tf.get_default_graph(
        ).get_tensor_by_name("phase_train:0")

        self.id_dataset = id_data.get_id_data('./ids/', self.pnet, self.rnet,
                                              self.onet, self.sess,
                                              self.embeddings,
                                              self.images_placeholder,
                                              self.phase_train_placeholder)
        self.print_id_dataset_table()
Exemplo n.º 2
0
 def reload_id_set(self):
     print("reloading id set")
     self.id_dataset = id_data.get_id_data('./ids/', self.pnet, self.rnet,
                                           self.onet, self.sess,
                                           self.embeddings,
                                           self.images_placeholder,
                                           self.phase_train_placeholder)
     self.print_id_dataset_table()
Exemplo n.º 3
0
def main(image_path, ready=False):
    id_folder = ['ids']
    test_folder = None
    model = './model/'

    with tf.Graph().as_default():
        with tf.Session() as sess:

            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(id_folder[0], pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder, phase_train_placeholder, embeddings, id_dataset, test_folder)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False

            if ready is False:
                frame = cv2.imread(image_path)
            elif ready is True:
                frame = image_path

            face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

            if len(face_patches) > 0:
                face_patches = np.stack(face_patches)
                feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                embs = sess.run(embeddings, feed_dict=feed_dict)

                print('Matches in frame:')
                for i in range(len(embs)):
                    bb = padded_bounding_boxes[i]

                    matching_id, dist = find_matching_id(id_dataset, embs[i, :])
                    if matching_id:
                        print('Hi %s! Distance: %1.4f' % (matching_id, dist))
                    else:
                        matching_id = 'Unknown'
                        print('Unkown! Couldn\'t fint match.')
                    return matching_id
            else:
                matching_id = 'Face Unfound'
                print(matching_id)
                return matching_id
Exemplo n.º 4
0
def gen1():
    """Video streaming generator function."""

    print("Starting to generate frames!!!")
    with tf.Graph().as_default():
        with tf.Session() as sess:
            print("Initialize tensor")
            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)
            print("Loading model file...")
            # Load the model
            load_model('./model/')
            print("Model file loaded...")
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data('./ids/', pnet, rnet, onet, sess,
                                             embeddings, images_placeholder,
                                             phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            while True:
                frame = camera.get_frame()
                print("Processing frame...")

                face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
                    frame, pnet, rnet, onet)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Matches in frame:')
                    for i in range(len(embs)):
                        bb = padded_bounding_boxes[i]

                        matching_id, dist = find_matching_id(
                            id_dataset, embs[i, :])
                        if matching_id:
                            print('Hi %s! Distance: %1.4f' %
                                  (matching_id, dist))
                        else:
                            matching_id = 'Unkown'
                            print('Unkown! Couldn\'t fint match.')

                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(frame, matching_id, (bb[0], bb[3]), font,
                                    1, (255, 255, 255), 1, cv2.LINE_AA)

                        cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]),
                                      (255, 0, 0), 2)
                else:
                    print("No face patches")

                # cnt = cv2.imencode('.jpeg', frame)[1]
                # b64 = base64.encodebytes(cnt)

                encimg = cv2.imencode('.jpg', frame)[1].tostring()
                yield (b'--frame\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + encimg + b'\r\n')
Exemplo n.º 5
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(args.id_folder[0], pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder, phase_train_placeholder, embeddings, id_dataset, args.test_folder)

            cap = cv2.VideoCapture(0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)               # AREA TO BE EDITED

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            find_match = False
            while(True):
                start = time.time()
                _, frame = cap.read()

                face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                    embs = sess.run(embeddings, feed_dict=feed_dict)
                    
                    print('Matches in frame:')
                    for i in range(len(embs)):
                        bb = padded_bounding_boxes[i]

                        matching_id, dist = find_matching_id(id_dataset, embs[i, :])
                        if matching_id:
                            print('Hi %s! Distance: %1.4f' %(matching_id, dist))
                            find_match = True
                        else:
                            matching_id = 'Unkown'
                            print('Unkown! Couldn\'t fint match.')
                            find_match = False

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)

                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmarks[i, j]) - size, int(landmarks[i, j + 5]) - size)
                                bottom_right = (int(landmarks[i, j]) + size, int(landmarks[i, j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                
                
                else:
                    print('Couldn\'t find a face')
                
                if find_match is True:
                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frame)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()


def parse_arguments(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('model', type=str, help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
    parser.add_argument('id_folder', type=str, nargs='+', help='Folder containing ID folders')
    parser.add_argument('--test_folder', type=str, help='Folder containing test images.', default=None)
    return parser.parse_args(argv)

if __name__ == '__main__':
    main(parse_arguments(sys.argv[1:]))
Exemplo n.º 6
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            faceCascade = cv2.CascadeClassifier(
                'classifier/haarcascade_frontalface_default.xml')
            file = os.path.join(os.getcwd(), "log.csv")
            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(args.id_folder[0], pnet, rnet,
                                             onet, sess, embeddings,
                                             images_placeholder,
                                             phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder,
                     phase_train_placeholder, embeddings, id_dataset,
                     args.test_folder)

            cap = cv2.VideoCapture(0)
            cap.set(4, 9600)
            cap.set(3, 12800)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
            int_frame_height = int(frame_height)

            #            img_path = os.path.join(args.id_folder)
            #            f = open("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv", 'a')
            f = open(
                "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv",
                'a')
            #            df = pd.read_csv("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv")
            df = pd.read_csv(
                "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv"
            )
            #            df = pd.read_csv(f)
            #            print("df", df)
            #            columns = pd.read_csv("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv", nrows = 0)
            #            print('columns', columns)
            count = 0
            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            total_people = 10
            matched_ids = []
            ids = os.listdir('ids')
            while (True):
                start = time.time()
                _, frames = cap.read()

                gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
                count_msg = "Evacuated staff: " + str(count)
                emergency_msg = "Emergency Evacuation"
                remaining_msg = "Remaining staff: " + str(total_people - count)
                cv2.putText(frames, emergency_msg, (0, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 3,
                            cv2.LINE_AA)

                faces = faceCascade.detectMultiScale(
                    gray,
                    scaleFactor=1.05,
                    minNeighbors=8,
                    minSize=(55, 55),
                    flags=cv2.CASCADE_SCALE_IMAGE)
                imgs = []
                if len(ids) < 5:
                    show_missing(frames, ids, 0, int_frame_height)

                for (x, y, w, h) in faces:
                    crop_img = frames[y - 50:y + h + 25, x - 25:x + w + 25]
                    imgs.append(crop_img)

                for frame in imgs:
                    face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
                        frame, pnet, rnet, onet)
                    id_dataset = id_data.get_id_data(args.id_folder[0], pnet,
                                                     rnet, onet, sess,
                                                     embeddings,
                                                     images_placeholder,
                                                     phase_train_placeholder)

                    if len(face_patches) > 0:
                        face_patches = np.stack(face_patches)
                        feed_dict = {
                            images_placeholder: face_patches,
                            phase_train_placeholder: False
                        }
                        embs = sess.run(embeddings, feed_dict=feed_dict)

                        print('Matches in frame:')
                        for i in range(len(embs)):
                            bb = padded_bounding_boxes[i]

                            matching_id, dist = find_matching_id(
                                id_dataset, embs[i, :])
                            if matching_id:
                                print('Hi %s! Distance: %1.4f' %
                                      (matching_id, dist))

                                if matching_id not in matched_ids:
                                    matched_ids.append(matching_id)
                                    count = count + 1
                                    count_msg = "Evacuated staff: " + str(
                                        count)
                                    remaining_msg = "Remaining staff: " + str(
                                        total_people - count)
                                    if matching_id in ids:
                                        ids.remove(matching_id)

                            else:
                                matching_id = 'Unkown'
                                print('Unkown! Couldn\'t fint match.')
                                '''Make folder and store the image'''
                                try:
                                    #                                os.makedirs(os.path.join(args.id_folder, str(count)))
                                    #                                    dir1 = "D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(count)
                                    dir1 = "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(
                                        count)
                                    os.makedirs(dir1)

                                except OSError as e:
                                    if e.errno != errno.EEXIST:
                                        raise


#                                path_img = "D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(count) + "\\"
                                path_img = "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(
                                    count) + "\\"
                                img_name = path_img + str(count) + '.png'
                                cv2.imwrite(img_name, frame)
                                #                            start = time.time()
                                entry_time = datetime.datetime.fromtimestamp(
                                    start).strftime('%c')
                                df_tmp = pd.DataFrame(
                                    [[str(count), entry_time, "Still_Inside"]],
                                    columns=df.columns)
                                count = count + 1
                                count_msg = "Evacuated staff: " + str(count)
                                remaining_msg = "Remaining staff: " + str(
                                    total_people - count)
                                df = df.append(df_tmp)
                                df.to_csv(f, header=False, index=False)

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)

                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)

                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmarks[i, j]) - size,
                                            int(landmarks[i, j + 5]) - size)
                                bottom_right = (int(landmarks[i, j]) + size,
                                                int(landmarks[i, j + 5]) +
                                                size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                    else:
                        print('Couldn\'t find a face')
                        if len(ids) < 5:
                            show_missing(frames, ids, 0, int_frame_height)

                cv2.putText(frames, remaining_msg, (0, int_frame_height - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frames, count_msg, (0, 145),
                            cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2,
                            cv2.LINE_AA)

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frames, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frames)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    f.close()
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()