def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            #CASE_PATH = ".\\pretrained_models\\haarcascade_frontalface_alt.xml"
            count = 0
            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)
            #count=0
            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(
                args.id_folder[0],
                mtcnn,
                sess,
                embeddings,
                images_placeholder,
                phase_train_placeholder,
                args.threshold,
            )
            test(mtcnn, id_data, args, sess, embeddings, images_placeholder,
                 phase_train_placeholder, count)
    def init_tensor_flow(self):
        print("Initializing Tensor-flow ...")
        self.graph = tf.Graph()
        self.sess = tf.Session()

        print("Tensorflow session created ")
        self.pnet, self.rnet, self.onet = detect_and_align.create_mtcnn(
            self.sess, None)
        print("Loading model file...")
        # Load the model
        self.load_model('./model/')
        print("Model file loaded...")
        self.images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        self.embeddings = tf.get_default_graph().get_tensor_by_name(
            "embeddings:0")
        self.phase_train_placeholder = tf.get_default_graph(
        ).get_tensor_by_name("phase_train:0")

        self.id_dataset = id_data.get_id_data('./ids/', self.pnet, self.rnet,
                                              self.onet, self.sess,
                                              self.embeddings,
                                              self.images_placeholder,
                                              self.phase_train_placeholder)
        self.print_id_dataset_table()
Beispiel #3
0
def main(image_path, ready=False):
    id_folder = ['ids']
    test_folder = None
    model = './model/'

    with tf.Graph().as_default():
        with tf.Session() as sess:

            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(id_folder[0], pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder, phase_train_placeholder, embeddings, id_dataset, test_folder)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False

            if ready is False:
                frame = cv2.imread(image_path)
            elif ready is True:
                frame = image_path

            face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

            if len(face_patches) > 0:
                face_patches = np.stack(face_patches)
                feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                embs = sess.run(embeddings, feed_dict=feed_dict)

                print('Matches in frame:')
                for i in range(len(embs)):
                    bb = padded_bounding_boxes[i]

                    matching_id, dist = find_matching_id(id_dataset, embs[i, :])
                    if matching_id:
                        print('Hi %s! Distance: %1.4f' % (matching_id, dist))
                    else:
                        matching_id = 'Unknown'
                        print('Unkown! Couldn\'t fint match.')
                    return matching_id
            else:
                matching_id = 'Face Unfound'
                print(matching_id)
                return matching_id
Beispiel #4
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            print(np.array(embeddings).shape)
            '''
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):

    print('Creating networks and loading parameters')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            mtcnn = create_mtcnn(sess, None)

    aligned_images = []
    id_image_paths = []
    id_names = []

    for i in image_paths:
        print(i)
        img = misc.imread(os.path.expanduser(i), mode='RGB')
        img_size = np.asarray(img.shape)[0:2]
        bounding_boxes, _ = detect_face(img, mtcnn['pnet'], mtcnn['rnet'],
                                        mtcnn['onet'])
        if bounding_boxes.shape[0] > 0:
            det = np.squeeze(bounding_boxes[0, 0:4])
            bb = np.zeros(4, dtype=np.int32)
            bb[0] = np.maximum(det[0] - margin / 2, 0)
            bb[1] = np.maximum(det[1] - margin / 2, 0)
            bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
            bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
            cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
            aligned = misc.imresize(cropped, (image_size, image_size),
                                    interp='bilinear')
            prewhitened = prewhiten(aligned)
            aligned_images.append(prewhitened)
            id_image_paths += i
            id_names += [i.split('/')[-2]]
            print("Face recognized: ", id_names[-1])
    images = np.stack(aligned_images)
    return images, np.stack(id_names)
Beispiel #6
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(
                args.id_folder[0],
                mtcnn,
                sess,
                embeddings,
                images_placeholder,
                phase_train_placeholder,
                args.threshold,
            )
            print(id_data)
            cap = cv2.VideoCapture(
                'sample_fazen_hammad.mp4')  #'sample1.mp4')#0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
            show_landmarks = False
            show_bb = True
            show_id = True
            show_fps = True
            i = 0
            while True:
                start = time.time()
                asb, frame = cap.read()
                #cap = cv2.VideoCapture(0)
                frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
                #asb, frame = cap.read()

                #frame = cv2.resize(frame, (700,700), interpolation =cv2.INTER_NEAREST)

                #frame=frame/255
                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)

                #print(np.array(padded_bounding_boxes).shape,np.array(face_patches).shape)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)
                    print("Matches in frame:")
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)

                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        if matching_id is None:
                            matching_id = "Unknown"
                            print("Unknown! Couldn't fint match.")
                            faace = np.array(face_patches)
                            #faace=faace.reshape(faace.shape(-3),faace.shape(-2),faace.shape(-1))
                            faace = faace[0, :, :, :]
                            #print(faace.shape)
                            #cv2.imwrite("output/unknown"+str(i)+str(".png"),faace*255)
                            #i=i+1
                        else:
                            print("Hi %s! Distance: %1.4f" %
                                  (matching_id, dist))
                            faace = np.array(face_patches)

                            faace = faace[0, :, :, :]  #.reshape(160,160,3)
                            cv2.imwrite(
                                "output1/" + matching_id + str(i) + ".png",
                                faace * 255)
                            #i=i+1

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                else:
                    print("Couldn't find a face")

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)
                print("fps", fps, "seconds", seconds)
                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow("frame", frame)

                key = cv2.waitKey(1)
                if key == ord("q"):
                    break
                elif key == ord("l"):
                    show_landmarks = not show_landmarks
                elif key == ord("b"):
                    show_bb = not show_bb
                elif key == ord("i"):
                    show_id = not show_id
                elif key == ord("f"):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()
Beispiel #7
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(
                sess, None
            )  #It calls create_mtcnn function from the detect_and_align file

            load_model(
                args.model
            )  #IT loads the facenet 20170512-110547.pb pre-trained model
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")

            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            #print("embeddings",embeddings)
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings,
                             images_placeholder, phase_train_placeholder,
                             args.threshold)

            cap = cv2.VideoCapture(0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = True
            show_bb = True
            show_id = True
            show_fps = False
            while (True):
                start = time.time()

                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)
                print("landmarks", landmarks)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Matches in frame:')
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)

                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        if matching_id is None:
                            matching_id = 'Unknown'
                            print('Unknown! Couldn\'t fint match.')
                        else:

                            print('Hi %s! Distance: %1.4f' %
                                  (matching_id, dist))
                            now = datetime.now()
                        # print()
                        #time1=now.strftime("%I:%M:%S %p")

                        #writer = csv.writer(csvFile)
                        #writer.writerow(csvData)

                        #csvFile.close()

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame,
                                        matching_id + now.strftime(" %I:%M%p"),
                                        (bb[0], bb[3]), font, 1, (0, 0, 255),
                                        2, cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 1)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                else:
                    print('Couldn\'t find a face')

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frame)

                key = cv2.waitKey(100)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()
Beispiel #8
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(
                args.id_folder[0],
                mtcnn,
                sess,
                embeddings,
                images_placeholder,
                phase_train_placeholder,
                args.threshold,
            )

            # OPEN CAMERA AND TAKE A SNAPSHOT
            cam = cv2.VideoCapture(0)
            cv2.namedWindow("test")
            img_counter = 0

            while True:
                ret, frame = cam.read()
                cv2.imshow("test", frame)
                if not ret:
                    break
                k = cv2.waitKey(1)

                if k % 256 == 27:
                    # ESC pressed
                    print("Escape hit, closing...")
                    break
                elif k % 256 == 32:
                    # SPACE pressed
                    img_name = "attendance" + str(date.today()) + ".png"
                    cv2.imwrite(img_name, frame)
                    print("{} written!".format(img_name))
                    img_counter += 1

            cam.release()

            cv2.destroyAllWindows()

            # Now that we have an image, we detect and recognise the faces and append them in list
            present = []

            face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                frame, mtcnn)

            if len(face_patches) > 0:
                face_patches = np.stack(face_patches)
                feed_dict = {
                    images_placeholder: face_patches,
                    phase_train_placeholder: False
                }
                embs = sess.run(embeddings, feed_dict=feed_dict)

                print("Attendance:")
                matching_ids, matching_distances = id_data.find_matching_ids(
                    embs)

                for bb, landmark, matching_id, dist in zip(
                        padded_bounding_boxes, landmarks, matching_ids,
                        matching_distances):
                    if matching_id is None:
                        matching_id = "Unknown"
                        print("Unknown! Couldn't find match.")
                    else:
                        print("%s" % (matching_id)
                              )  #prints all the names present in the image
                        present.append(matching_id)
                        with open('presentstudents.txt', 'w') as filehandle:
                            for listitem in present:
                                filehandle.write('%s\n' % listitem)
            else:
                print("Couldn't find a face")
Beispiel #9
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(
                args.id_folder[0],
                mtcnn,
                sess,
                embeddings,
                images_placeholder,
                phase_train_placeholder,
                args.threshold,
            )

            cap = cv2.VideoCapture(0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            while True:
                start = time.time()
                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print("Matches in frame:")
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)

                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        if matching_id is None:
                            matching_id = "Unknown"
                            print("Unknown! Couldn't fint match.")
                        else:
                            print("Hi %s! Distance: %1.4f" %
                                  (matching_id, dist))

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                else:
                    print("Couldn't find a face")

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow("frame", frame)

                key = cv2.waitKey(1)
                if key == ord("q"):
                    break
                elif key == ord("l"):
                    show_landmarks = not show_landmarks
                elif key == ord("b"):
                    show_bb = not show_bb
                elif key == ord("i"):
                    show_id = not show_id
                elif key == ord("f"):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()
Beispiel #10
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            faceCascade = cv2.CascadeClassifier(
                'classifier/haarcascade_frontalface_default.xml')
            file = os.path.join(os.getcwd(), "log.csv")
            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(args.id_folder[0], pnet, rnet,
                                             onet, sess, embeddings,
                                             images_placeholder,
                                             phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder,
                     phase_train_placeholder, embeddings, id_dataset,
                     args.test_folder)

            cap = cv2.VideoCapture(0)
            cap.set(4, 9600)
            cap.set(3, 12800)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
            int_frame_height = int(frame_height)

            #            img_path = os.path.join(args.id_folder)
            #            f = open("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv", 'a')
            f = open(
                "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv",
                'a')
            #            df = pd.read_csv("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv")
            df = pd.read_csv(
                "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv"
            )
            #            df = pd.read_csv(f)
            #            print("df", df)
            #            columns = pd.read_csv("D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\log.csv", nrows = 0)
            #            print('columns', columns)
            count = 0
            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            total_people = 10
            matched_ids = []
            ids = os.listdir('ids')
            while (True):
                start = time.time()
                _, frames = cap.read()

                gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
                count_msg = "Evacuated staff: " + str(count)
                emergency_msg = "Emergency Evacuation"
                remaining_msg = "Remaining staff: " + str(total_people - count)
                cv2.putText(frames, emergency_msg, (0, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 3,
                            cv2.LINE_AA)

                faces = faceCascade.detectMultiScale(
                    gray,
                    scaleFactor=1.05,
                    minNeighbors=8,
                    minSize=(55, 55),
                    flags=cv2.CASCADE_SCALE_IMAGE)
                imgs = []
                if len(ids) < 5:
                    show_missing(frames, ids, 0, int_frame_height)

                for (x, y, w, h) in faces:
                    crop_img = frames[y - 50:y + h + 25, x - 25:x + w + 25]
                    imgs.append(crop_img)

                for frame in imgs:
                    face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
                        frame, pnet, rnet, onet)
                    id_dataset = id_data.get_id_data(args.id_folder[0], pnet,
                                                     rnet, onet, sess,
                                                     embeddings,
                                                     images_placeholder,
                                                     phase_train_placeholder)

                    if len(face_patches) > 0:
                        face_patches = np.stack(face_patches)
                        feed_dict = {
                            images_placeholder: face_patches,
                            phase_train_placeholder: False
                        }
                        embs = sess.run(embeddings, feed_dict=feed_dict)

                        print('Matches in frame:')
                        for i in range(len(embs)):
                            bb = padded_bounding_boxes[i]

                            matching_id, dist = find_matching_id(
                                id_dataset, embs[i, :])
                            if matching_id:
                                print('Hi %s! Distance: %1.4f' %
                                      (matching_id, dist))

                                if matching_id not in matched_ids:
                                    matched_ids.append(matching_id)
                                    count = count + 1
                                    count_msg = "Evacuated staff: " + str(
                                        count)
                                    remaining_msg = "Remaining staff: " + str(
                                        total_people - count)
                                    if matching_id in ids:
                                        ids.remove(matching_id)

                            else:
                                matching_id = 'Unkown'
                                print('Unkown! Couldn\'t fint match.')
                                '''Make folder and store the image'''
                                try:
                                    #                                os.makedirs(os.path.join(args.id_folder, str(count)))
                                    #                                    dir1 = "D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(count)
                                    dir1 = "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(
                                        count)
                                    os.makedirs(dir1)

                                except OSError as e:
                                    if e.errno != errno.EEXIST:
                                        raise


#                                path_img = "D:\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(count) + "\\"
                                path_img = "C:\\Users\\Siva-Datta.Mannava\\OneDrive - Shell\\Apps\\ml-comm\\smile\\facerecog\\FaceRecognition-master\\ids\\" + str(
                                    count) + "\\"
                                img_name = path_img + str(count) + '.png'
                                cv2.imwrite(img_name, frame)
                                #                            start = time.time()
                                entry_time = datetime.datetime.fromtimestamp(
                                    start).strftime('%c')
                                df_tmp = pd.DataFrame(
                                    [[str(count), entry_time, "Still_Inside"]],
                                    columns=df.columns)
                                count = count + 1
                                count_msg = "Evacuated staff: " + str(count)
                                remaining_msg = "Remaining staff: " + str(
                                    total_people - count)
                                df = df.append(df_tmp)
                                df.to_csv(f, header=False, index=False)

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)

                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)

                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmarks[i, j]) - size,
                                            int(landmarks[i, j + 5]) - size)
                                bottom_right = (int(landmarks[i, j]) + size,
                                                int(landmarks[i, j + 5]) +
                                                size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                    else:
                        print('Couldn\'t find a face')
                        if len(ids) < 5:
                            show_missing(frames, ids, 0, int_frame_height)

                cv2.putText(frames, remaining_msg, (0, int_frame_height - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
                cv2.putText(frames, count_msg, (0, 145),
                            cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2,
                            cv2.LINE_AA)

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frames, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frames)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    f.close()
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()
Beispiel #11
0
    max_step = -1
    for f in files:
        step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
        if step_str is not None and len(step_str.groups()) >= 2:
            step = int(step_str.groups()[1])
            if step > max_step:
                max_step = step
                ckpt_file = step_str.groups()[0]
    return meta_file, ckpt_file


with tf.Graph().as_default():
    sess = tf.Session()
    #with tf.Session() as sess:

    pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

    model_exp = '/model'
    print('Model directory: %s' % model_exp)
    meta_file, ckpt_file = get_model_filenames(model_exp)

    print('Metagraph file: %s' % meta_file)
    print('Checkpoint file: %s' % ckpt_file)

    saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
    saver.restore(sess, os.path.join(model_exp, ckpt_file))

    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
    phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
        "phase_train:0")
Beispiel #12
0
def main(argv):
    modelDir = os.path.join(fileDir, '..', '..', 'models')

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        type=str,
                        help="Path to Facenet pre-trained network model.",
                        default=os.path.join(
                            './models/facenet-1/20170511-185253/',
                            '20170511-185253.pb'))
    parser.add_argument('--imgDim',
                        type=int,
                        help="Default image dimension.",
                        default=96)
    parser.add_argument('--identity',
                        type=str,
                        help="Identity of the person",
                        default='Unknown')

    args = parser.parse_args(argv)

    with tf.Graph().as_default():
        with tf.Session() as sess:

            pnet, rnet, onet = detect_and_align.create_mtcnn(
                sess, args.identity)
            load_model(args.model)

            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            cap = cv2.VideoCapture(0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            while (True):
                start = time.time()
                _, frame = cap.read()

                matching_id, annotatedFrame = processFrame(
                    sess, frame, embeddings, None)

                if matching_id:
                    cv2.imshow('frame', annotatedFrame)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()
Beispiel #13
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(
                args.id_folder[0],
                mtcnn,
                sess,
                embeddings,
                images_placeholder,
                phase_train_placeholder,
                args.threshold,
            )

            ##Tao folder lưu hình ảnh của stranger trong video
            flist = os.listdir('stranger')
            folder_stranger = str(flist[len(flist) - 1])
            if (folder_stranger != '0'):
                folder_stranger = str(int(folder_stranger) + 1)
            else:
                folder_stranger = '1'
            os.mkdir('.\\stranger\\' + folder_stranger)
            if (args.link_video[0] == '0'):
                cap = cv2.VideoCapture(0)
            else:
                cap = cv2.VideoCapture(args.link_video[0])
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            i = 0
            tmp_time = time.time()
            start_time = tmp_time
            while True:
                start = time.time()
                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print("Matches in frame:")
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)

                    link_img = ''
                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        link_img = ''
                        if matching_id is None:
                            matching_id = "Unknown"
                            print("Unknown! Couldn't fint match.")
                            if (args.link_video[0] != '0'):  ##import video
                                link_img = './/stranger//' + folder_stranger + '//stranger_in_' + str(
                                    i / cap.get(cv2.CAP_PROP_FPS)) + '.jpg'
                            else:  ##live cam
                                link_img = './/stranger//' + folder_stranger + '//stranger_in_' + str(
                                    round(-start_time + time.time())) + '.jpg'

                        else:
                            print("Hi %s! Distance: %1.4f" %
                                  (matching_id, dist))
                            # if(args.link_video[0]!='0'):
                            #     link_img='.//stranger//not_stranger_in_'+str(i/cap.get(cv2.CAP_PROP_FPS))+'.jpg'
                            # else:
                            #     link_img='.//stranger//not_stranger_in_'+str(round(-start_time+time.time()))+'.jpg'
                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)

                    if (link_img != ''):
                        if (args.link_video[0] != '0'):
                            if (i % cap.get(cv2.CAP_PROP_FPS) == 0):
                                cv2.imwrite(link_img, frame)
                        else:
                            if (time.time() - tmp_time >= 1):
                                cv2.imwrite(link_img, frame)
                                tmp_time = time.time()
                else:
                    print("Couldn't find a face")
                i += 1
                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)
                cv2.imshow("frame", frame)

                key = cv2.waitKey(1)
                if key == ord("q"):
                    break
                elif key == ord("l"):
                    show_landmarks = not show_landmarks
                elif key == ord("b"):
                    show_bb = not show_bb
                elif key == ord("i"):
                    show_id = not show_id
                elif key == ord("f"):
                    show_fps = not show_fps

            mycmd_importcap.release()
            cv2.destroyAllWindows()
Beispiel #14
0
def main(args):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Graph().as_default():
        with tf.Session(config=config) as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder, float(args.threshold))

            video = 'output1.avi'
            gst_tx2 = "nvarguscamerasrc !video/x-raw(memory:NVMM), width=(int)640, height=(int)360, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink"
            gst_usb = "v4l2src device=/dev/video1 ! video/x-raw, width=(int)320, height=(int)240, format=(string)RGB ! videoconvert ! appsink"
            cap = cv2.VideoCapture(gst_usb, cv2.CAP_GSTREAMER)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False

            present = defaultdict(int)

            while (True):
                start = time.time()
                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Matches in frame:')
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)

                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        if matching_id is None:
                            matching_id = 'Unknown'
                            print('Unknown! Couldn\'t find match.')
                        else:
                            print('Hi %s! Distance: %1.4f' %
                                  (matching_id, dist))
                            present[matching_id] += 1

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                else:
                    print('Couldn\'t find a face')

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('Frame', frame)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    with open(
                            str(args.id_folder.split('/')[-1]) + '_' +
                            str(datetime.now()) + '.csv', 'w') as f:
                        writer = csv.writer(f)
                        writer.writerow([
                            'Class: ' + str(args.id_folder.split('/')[-1]) +
                            '    ', 'Date and Time: ' + str(datetime.now())
                        ])
                        for i in present.keys():
                            writer.writerow([i])
                    f.close()
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            sess.close()
            cv2.destroyAllWindows()
Beispiel #15
0
def main(args):
    with tf.Graph().as_default():
        with tf.compat.v1.Session(
        ) as sess:  #ch: tf.session wont work in tf2.0

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.compat.v1.get_default_graph(
            ).get_tensor_by_name("input:0")
            embeddings = tf.compat.v1.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.compat.v1.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings,
                             images_placeholder, phase_train_placeholder,
                             args.threshold)

            cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            frame_detections = None
            while True:
                start = time.time()
                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)
                    frame_detections = {
                        "embs": embs,
                        "bbs": padded_bounding_boxes,
                        "frame": frame.copy()
                    }

                    print("Matches in frame:")
                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        if matching_id is None:
                            matching_id = "Unknown"
                            print("Unknown! Couldn't fint match.")
                        else:
                            # markAttendance(matching_id) #passing name to mark attendance
                            print("Hi %s! Distance: %1.4f" %
                                  (matching_id, dist))

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (255, 255, 255), 1,
                                        cv2.LINE_AA)

                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                else:
                    print("Couldn't find a face")

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow("frame", frame)

                key = cv2.waitKey(1)
                if key == ord("q"):
                    break
                elif key == ord("l"):
                    show_landmarks = not show_landmarks
                elif key == ord("b"):
                    show_bb = not show_bb
                elif key == ord("i"):
                    show_id = not show_id
                elif key == ord("f"):
                    show_fps = not show_fps
                elif key == ord("s") and frame_detections is not None:
                    for emb, bb in zip(frame_detections["embs"],
                                       frame_detections["bbs"]):
                        patch = frame_detections["frame"][bb[1]:bb[3],
                                                          bb[0]:bb[2], :]
                        cv2.imshow("frame", patch)
                        cv2.waitKey(1)
                        new_id = easygui.enterbox(
                            "Who's in the image? Leave empty for non-valid")
                        if len(new_id) > 0:
                            id_data.add_id(emb, new_id, patch)

            cap.release()
            cv2.waitKey(0)
            cv2.destroyAllWindows()
Beispiel #16
0
def run():
    with tf.Session() as sess:

        LOG.log("Loading modell","SYSTEM")
        #temp test
        global pnet
        global rnet
        global onet
        global images_placeholder
        global embeddings
        global phase_train_placeholder
        
        pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)
        
        model_exp = os.path.expanduser(model_path)
        if (os.path.isfile(model_exp)):
           # print('Model filename: %s' % model_exp)
            with gfile.FastGFile(model_exp, 'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
                tf.import_graph_def(graph_def, name='')
            
        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")            
    
    
        # set up tensorflow model
        #load_model(model_path)

        LOG.log("Start system","SYSTEM")
        while True:
         
            if cam_cap.isOpened():
                start = time.time()
                # Get current frame
                global frame
                ret, frame = cam_cap.read()


                # Count tick
                global ticks
                ticks = ticks + 1

                # Do detection
                global DETECTION_SLEEP_TICKS
                if DETECTION_SLEEP_TICKS <= ticks:

                 #print("Detection")
                    global face_box
                    global face_found
                    
                    global show_id
                    global show_bb
                    global show_landmarks
    
                    # Do detection
                    face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

                    # if found faces
                    if len(face_patches) > 0:
                        face_patches = np.stack(face_patches)
                        feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
       
                        embs = sess.run(embeddings, feed_dict=feed_dict)

                       # print('Matches in frame:')
                        for i in range(len(embs)):
                            bb = padded_bounding_boxes[i]
            
                            if show_bb:
                                cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)

                            if show_landmarks:
                                for j in range(5):
                                    size = 1
                                    top_left = (int(landmarks[i, j]) - size, int(landmarks[i, j + 5]) - size)
                                    bottom_right = (int(landmarks[i, j]) + size, int(landmarks[i, j + 5]) + size)
                                    cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
        
                        # Convert box to OpenCV
                        
                        face_box = convert_tensorflow_box_to_openCV_box(padded_bounding_boxes[0])
                       # print (face_box)
                        
                        # if running custom tracker this is needed
                        update_custom_tracker()

                        face_found = True
                        #return True
        

                    else:
                    # No face
                        face_found = False
                    #return False

                    # if face found
                    

                    
                    if face_found:
                        ticks = 0
                        global FAST_DETECTION_SLEEP_TICKS
                        DETECTION_SLEEP_TICKS = FAST_DETECTION_SLEEP_TICKS
                    else:
                        # Make less detections if not
                        ticks = 0
                        global SLOW_DETECTION_SLEEP_TICKS
                        DETECTION_SLEEP_TICKS = SLOW_DETECTION_SLEEP_TICKS 
                else:
                    # Do tracking
                    if face_found:
                        object_custom_tracking()

                # print fps
                end = time.time()

                seconds = end - start
                if seconds != 0:
                    fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, 100), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                
                #Show Cam
                cv2.imshow('Detection GUI', frame)

                #Close Program functionallity
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cam_cap.release()
                    cv2.destroyAllWindows()
                    break

                time.sleep(0.2) # Sleep
def main(args):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Graph().as_default():
        with tf.Session(config=config) as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, args.threshold)

#            gst_usb ="v4l2src device=/dev/video1 ! video/x-raw, width=(int)320, height=(int)240, format=(string)RGB ! videoconvert ! appsink"
#            cap = cv2.VideoCapture(gst_usb, cv2.CAP_GSTREAMER)
#            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
            
            show_landmarks = False
            show_bb = True
            show_id = True
            show_fps = True 
            i = 0                                                                #Count for images
            while(i < 1):                                                        #Recognition stops at last image
                
                start = time.time()
#                _, frame = cap.read()                                           #Frame from video feed is deactivated
                frame = cv2.imread('more_pics/pic{}.png'.format(i))              #Reads the images that are to be scanned by the FR model
                frame_height = frame.shape[0]                                    #Height of the image

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Matches in frame:')
                    matching_ids, matching_distances = id_data.find_matching_ids(embs)

                    for bb, landmark, matching_id, dist in zip(padded_bounding_boxes, landmarks, matching_ids, matching_distances):
                        if matching_id is None:
                            matching_id = 'Unknown'
                            print('Unknown! Couldn\'t find match.')
                        else:
                            print('Hi %s! Distance: %1.4f' % (matching_id, dist))

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                else:
                    print('Couldn\'t find a face')

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

#                cv2.imshow('frame', frame)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps
                cv2.imwrite('more_pics/new{}.png'.format(i), frame)       #Writes the image that was scanned     
                i += 1
#            cap.release()
            cv2.destroyAllWindows()
Beispiel #18
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data(args.id_folder[0], pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            test_run(pnet, rnet, onet, sess, images_placeholder, phase_train_placeholder, embeddings, id_dataset, args.test_folder)

            cap = cv2.VideoCapture(0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)               # AREA TO BE EDITED

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            find_match = False
            while(True):
                start = time.time()
                _, frame = cap.read()

                face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                    embs = sess.run(embeddings, feed_dict=feed_dict)
                    
                    print('Matches in frame:')
                    for i in range(len(embs)):
                        bb = padded_bounding_boxes[i]

                        matching_id, dist = find_matching_id(id_dataset, embs[i, :])
                        if matching_id:
                            print('Hi %s! Distance: %1.4f' %(matching_id, dist))
                            find_match = True
                        else:
                            matching_id = 'Unkown'
                            print('Unkown! Couldn\'t fint match.')
                            find_match = False

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)

                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmarks[i, j]) - size, int(landmarks[i, j + 5]) - size)
                                bottom_right = (int(landmarks[i, j]) + size, int(landmarks[i, j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                
                
                else:
                    print('Couldn\'t find a face')
                
                if find_match is True:
                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frame)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()


def parse_arguments(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('model', type=str, help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
    parser.add_argument('id_folder', type=str, nargs='+', help='Folder containing ID folders')
    parser.add_argument('--test_folder', type=str, help='Folder containing test images.', default=None)
    return parser.parse_args(argv)

if __name__ == '__main__':
    main(parse_arguments(sys.argv[1:]))
Beispiel #19
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, args.threshold)

            cap = cv2.VideoCapture(0)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = False
            show_fps = False
            show_mesage = False
            while(True):
                start = time.time()
                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    matching_ids, matching_distances = id_data.find_matching_ids(embs)

                    for bb, landmark, matching_id, dist in zip(padded_bounding_boxes, landmarks, matching_ids, matching_distances):
                        if matching_id is None:
                            matching_id = 'Unknown'
                            createDefaultMessageONCE()
                        else:
                            createPersonalizedMessageONCE(matching_id)
                            #print('Hi %s! Distance: %1.4f' % (matching_id, dist))

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                        

                else:
                    #print('Couldn\'t find a face')
                    noFaceMessageONCE()

                end = time.time()
                seconds = end - start
                fps = round(1 / seconds, 2)

                #PYGAME loop
                if g:
                    for event in pygame.event.get():
                        if event.type == QUIT or (event.type == KEYDOWN):
                            pygame.quit()
                            return

                    g.loop()
                            
                
                if show_fps:
                    print(str(fps))
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                #cv2.imshow('frame', frame)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps
                elif key == ord('m'):
                    show_mesage = not show_mesage

            cap.release()
            cv2.destroyAllWindows()
Beispiel #20
0
def gen1():
    """Video streaming generator function."""

    print("Starting to generate frames!!!")
    with tf.Graph().as_default():
        with tf.Session() as sess:
            print("Initialize tensor")
            pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)
            print("Loading model file...")
            # Load the model
            load_model('./model/')
            print("Model file loaded...")
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            id_dataset = id_data.get_id_data('./ids/', pnet, rnet, onet, sess,
                                             embeddings, images_placeholder,
                                             phase_train_placeholder)
            print_id_dataset_table(id_dataset)

            while True:
                frame = camera.get_frame()
                print("Processing frame...")

                face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
                    frame, pnet, rnet, onet)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Matches in frame:')
                    for i in range(len(embs)):
                        bb = padded_bounding_boxes[i]

                        matching_id, dist = find_matching_id(
                            id_dataset, embs[i, :])
                        if matching_id:
                            print('Hi %s! Distance: %1.4f' %
                                  (matching_id, dist))
                        else:
                            matching_id = 'Unkown'
                            print('Unkown! Couldn\'t fint match.')

                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(frame, matching_id, (bb[0], bb[3]), font,
                                    1, (255, 255, 255), 1, cv2.LINE_AA)

                        cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]),
                                      (255, 0, 0), 2)
                else:
                    print("No face patches")

                # cnt = cv2.imencode('.jpeg', frame)[1]
                # b64 = base64.encodebytes(cnt)

                encimg = cv2.imencode('.jpg', frame)[1].tostring()
                yield (b'--frame\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + encimg + b'\r\n')
Beispiel #21
0
def main(args):
    print('################################################################################')
    my_style_data=[]
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None) #It calls create_mtcnn function from the detect_and_align file 

            load_model(args.model) #IT loads the facenet 20170512-110547.pb pre-trained model
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, args.threshold)
            #url ='rtsp://192.168.137.135:4747/video'

            #cap = cv2.VideoCapture(0)
            connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            connection.settimeout(TIMEOUT_SOCKET)
            connection.connect((IP_SERVER, PORT_SERVER))

            while True:
                try:
                    fileDescriptor = connection.makefile(mode='rb')
                    result = fileDescriptor.readline()
                    fileDescriptor.close()
                    result = base64.b64decode(result)
                    frame = np.fromstring(result, dtype=np.uint8)
                    frame_matrix = np.array(frame)
                    frame_matrix = np.reshape(frame_matrix, (IMAGE_HEIGHT, IMAGE_WIDTH,COLOR_PIXEL))
                    xyz=cv2.imshow('Window title', frame_matrix)
                    cv2.read(xyz)
                    print("cccc")
            

                    frame_height = frame_matrix.get(cv2.CAP_PROP_FRAME_HEIGHT)


                    show_landmarks = True
                    show_bb = True
                    show_id = True
                    show_fps = False
                    show_bb1 = True
                    while(True):
                        start = time.time()
                        v_offset = 50 
                        time.sleep(0.0001)
                        _, frame = frame_matrix.read()
                        frame1=frame

                        # Locate faces and landmarks in frame
                        face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)

                        if len(face_patches) > 0:
                            face_patches = np.stack(face_patches)
                            feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                            embs = sess.run(embeddings, feed_dict=feed_dict)

                            print('Matches in frame:')
                            matching_ids, matching_distances = id_data.find_matching_ids(embs)

                            for bb, landmark, matching_id, dist in zip(padded_bounding_boxes, landmarks, matching_ids, matching_distances):
                                if matching_id is None:
                                    matching_id = 'Unknown'
                                    print('Unknown! Couldn\'t fint match.')
                                else:
                                    #if(int(bb[0])<=170 and int(bb[3])<=357 and int(bb[2])<=437 and int(bb[1])<=164):
                                    print('Hi akki %s! Distance: %1.4f' % (matching_id, dist))
                                    now=datetime.now()
                                    
                                    #csvData = [matching_id, dist,now.strftime("%x %I:%M:%S %p")]
                                              
                                    '''with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Student4.csv', 'a') as csvFile:
                                        writer = csv.writer(csvFile)
                                        writer.writerow(csvData)

                                    csvFile.close()'''

                                    


                                if show_id:                            
                                    font = cv2.FONT_HERSHEY_SIMPLEX
                                    print("bb_Frame",bb)
                                    if(210<int(bb[0])<350 and 150<int(bb[1])<250 and 300<int(bb[2])<460 and 310<int(bb[3])<450):

                                        welcome=" Welcome to Infogen labs"
                                        cv2.putText(frame,matching_id+ welcome, (0, 50), font, 1, (0,0,255), 2, cv2.LINE_AA)
                                        cv2.putText(frame,matching_id+now.strftime(" %I:%M%p"), (bb[0], bb[3]), font, 1, (0,0,255), 2, cv2.LINE_AA)
                                        #csvData = [matching_id, dist,now.strftime("%x  %I:%M:%S %p")]
                                       

                                        def previous_and_next(some_iterable):
                                            prevs, items, nexts = tee(some_iterable, 3)
                                            prevs = chain([None], prevs)
                                            nexts = chain(islice(nexts, 1, None), [None])
                                            return zip(prevs, items, nexts)
                                        csvData = [matching_id, dist,now.strftime("%x %I:%M:%S %p")]
                                        with open('C:/myproject/1Copy_FaceRecognition-master - Copy/Student5.csv', 'a') as csvFile1:
                                            writer = csv.writer(csvFile1)
                                            writer.writerow(csvData)
                                        csvFile1.close()
                                        my_style_data.append(csvData)
                                        print('******************************************************************************************')
                                        print(my_style_data)
                                        print('******************************************************************************************')
                                        if len(my_style_data) >=7:
                                            for prevs, item, nxt in previous_and_next(my_style_data):
                                                try:
                                                    if prevs[0] != item[0] or item[0] != nxt[0]:
                                                        
                                                        with open('C:/myproject/1Copy_FaceRecognition-master - Copy/Employee6.csv', 'a') as csvFile:
                                                            writer = csv.writer(csvFile)
                                                            
                                                            writer.writerow(item)
                                                        csvFile.close()
                                                except: pass
                                            del(my_style_data[0:7])




                                        '''if(matching_id=="Akshay"):
                                            with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Akshay.csv', 'a') as csvFile:
                                                writer = csv.writer(csvFile)
                                                writer.writerow(csvData)
                                            csvFile.close()
                                        if(matching_id=="Ajinkya"):
                                            with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Ajinkya.csv', 'a') as csvFile:
                                                writer = csv.writer(csvFile)
                                                writer.writerow(csvData)
                                            csvFile.close()'''



                                if show_bb:
                                    cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 1)
                                if show_bb1:
                                    cv2.rectangle(frame1, (261,174),(457,380), (255,0,255),2)
                                if show_landmarks:
                                    for j in range(5):
                                        size = 1
                                        x=(int(landmark[j]))
                                        top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
                                        bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
                                        cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                        else:
                            print('Couldn\'t find a face')

                        

                        

                        cv2.imshow('frame', frame)

                        key = cv2.waitKey(100)
                        if key == ord('q'):
                            break
                        elif key == ord('l'):
                            show_landmarks = not show_landmarks
                        elif key == ord('b'):
                            show_bb = not show_bb
                        elif key == ord('i'):
                            show_id = not show_id
                        elif key == ord('f'):
                            show_fps = not show_fps
                    frame_matrix.release()
                    cv2.destroyAllWindows()
Beispiel #22
0
def main():
    # with tf.Graph().as_default():
    with tf.Session() as sess:

        pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)

        #load_model('model/20170512-110547.pb')
        #Load model
        model_exp = os.path.expanduser('model/20170512-110547.pb')
        if (os.path.isfile(model_exp)):
            print('Model filename: %s' % model_exp)
            with gfile.FastGFile(model_exp, 'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
                tf.import_graph_def(graph_def, name='')
        # done loading

        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")

        cap = cv2.VideoCapture(0)
        frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

        show_landmarks = True
        show_bb = True
        show_id = False
        show_fps = True

        while (True):
            start = time.time()
            _, frame = cap.read()

            face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
                frame, pnet, rnet, onet)

            if len(face_patches) > 0:
                face_patches = np.stack(face_patches)
                feed_dict = {
                    images_placeholder: face_patches,
                    phase_train_placeholder: False
                }
                embs = sess.run(embeddings, feed_dict=feed_dict)

                print('Matches in frame:')
                for i in range(len(embs)):
                    bb = padded_bounding_boxes[i]

                    if show_id:
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(frame, matching_id, (bb[0], bb[3]), font,
                                    1, (255, 255, 255), 1, cv2.LINE_AA)

                    if show_bb:
                        cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]),
                                      (255, 0, 0), 2)

                    if show_landmarks:
                        for j in range(5):
                            size = 1
                            top_left = (int(landmarks[i, j]) - size,
                                        int(landmarks[i, j + 5]) - size)
                            bottom_right = (int(landmarks[i, j]) + size,
                                            int(landmarks[i, j + 5]) + size)
                            cv2.rectangle(frame, top_left, bottom_right,
                                          (255, 0, 255), 2)
            else:
                print('Couldn\'t find a face')

            end = time.time()

            seconds = end - start
            fps = round(1 / seconds, 2)

            if show_fps:
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font,
                            1, (255, 255, 255), 1, cv2.LINE_AA)

            cv2.imshow('frame', frame)

            key = cv2.waitKey(1)
            if key == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
Beispiel #23
0
def main(argv):
    # parse inputs
    parser = argparse.ArgumentParser()
    parser.add_argument("input_file", help="Path to the input video.")
    parser.add_argument("id_folder",
                        type=str,
                        nargs="+",
                        help="Folder containing ID folders")
    args = parser.parse_args()

    # initialize NSFW Model
    model = OpenNsfwModel()

    with tf.Graph().as_default():
        with tf.Session() as sess:

            # set variable defaults
            videoFile = args.input_file
            cap = cv2.VideoCapture(videoFile)
            frameRate = cap.get(5)  # get the frame rate
            totalFrameCount = cap.get(7)  # get the total number of frames
            img_size = 64
            margin = 0.4
            frameNsfw = 0
            isMinor = False
            minorDetected = False

            # set weights and initialize SFW model IsSFW
            with tf.variable_scope('IsSFW'):
                model.build(
                    weights_path="pretrained_models/open_nsfw-weights.npy")
                fn_load_image = None
                fn_load_image = create_yahoo_image_loader()
                sess.run(tf.global_variables_initializer())

            # initialize dlib face detector model and set variables
            detector = dlib.get_frontal_face_detector()
            model2 = WideResNet(img_size, 16, 8)()
            model2.load_weights("pretrained_models/weights.29-3.76_utk.hdf5")

            # initialize face identification model
            mtcnn = detect_and_align.create_mtcnn(sess, None)
            load_model("model/20170512-110547.pb")
            threshold = 1.0
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs for face identification model
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings,
                             images_placeholder, phase_train_placeholder,
                             threshold)

            while (cap.isOpened()):
                ret, frame = cap.read()
                frameId = cap.get(1)  # get the current frame number
                if (ret !=
                        True):  # if there is no video frame detected then exit
                    break

                # write video frame to disk and load as an image
                cv2.imwrite('./temp_files/temp.jpg', frame)
                image = fn_load_image('./temp_files/temp.jpg')

                # determine SFW status
                predictions = sess.run(model.predictions,
                                       feed_dict={model.input: image})
                if (predictions[0][1] >= 0.50):
                    frameNsfw = frameNsfw + 1
                    display_lbl = "NSFW"
                    AlertColor = [0, 0, 255]
                else:
                    display_lbl = "SFW"
                    AlertColor = [255, 0, 0]

                # detect faces in dlib face detection model
                image2 = frame
                image2_h, image2_w, _ = np.shape(image2)
                detected = detector(image2, 0)
                faces = np.empty((len(detected), img_size, img_size, 3))
                if len(detected
                       ) > 0:  # one or more faces were found in the frame
                    for i, d in enumerate(detected):
                        # extract the coordinates of the face
                        x1, y1, x2, y2, w, h = d.left(), d.top(), d.right(
                        ) + 1, d.bottom() + 1, d.width(), d.height()
                        xw1 = max(int(x1 - margin * w), 0)
                        yw1 = max(int(y1 - margin * h), 0)
                        xw2 = min(int(x2 + margin * w), image2_w - 1)
                        yw2 = min(int(y2 + margin * h), image2_h - 1)
                        # draw a rectangle around the face
                        cv2.rectangle(image2, (x1, y1), (x2, y2), (255, 0, 0),
                                      2)
                        faces[i, :, :, :] = cv2.resize(
                            image2[yw1:yw2 + 1, xw1:xw2 + 1, :],
                            (img_size, img_size))
                        # determine the height of the rectangle in case is near top of frame
                        rectangle_height = y2 - y1

                    # predict ages and genders of faces using dlib model
                    results = model2.predict(faces)
                    predicted_genders = results[0]
                    ages = np.arange(0, 101).reshape(101, 1)
                    predicted_ages = results[1].dot(ages).flatten()

                    # draw predictions by faces using dlib model
                    for i, d in enumerate(detected):
                        isMinor = False
                        if (int(predicted_ages[i] < 18)
                            ):  # detect if a minor is present in the video
                            isMinor = True
                            minorDetected = True
                        label = "{},{},{}".format(
                            int(predicted_ages[i]),
                            "M" if predicted_genders[i][0] < 0.5 else "F",
                            "-MINOR" if isMinor else "")
                        draw_label(image2, (d.left(), d.top()), label,
                                   rectangle_height)

                # Locate faces and landmarks in frame for identification
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)
                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)
                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        font = cv2.FONT_HERSHEY_COMPLEX_SMALL
                        cv2.putText(frame, matching_id,
                                    (bb[0] + 30, bb[3] + 5), font, 1,
                                    (255, 0, 255), 1, cv2.LINE_AA)

                # display whether frame is SFW or not
                percentageComplete = round((frameId) / (totalFrameCount) * 100)
                display_lbl = display_lbl + " " + str(
                    percentageComplete) + "% fps= " + str(round(frameRate, 2))
                size = cv2.getTextSize(display_lbl, cv2.FONT_HERSHEY_SIMPLEX,
                                       0.4, 1)[0]
                cv2.rectangle(image2, (1, 15 - size[1]), (1 + size[0], 20),
                              AlertColor, cv2.FILLED)
                cv2.putText(image2,
                            display_lbl, (1, 19),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.4, (255, 255, 255),
                            1,
                            lineType=cv2.LINE_AA)

                # display the frame as processed as quickly as possible
                cv2.imshow('frame2', image2)
                cv2.waitKey(1)

            # end of video
            cap.release()
            cv2.destroyAllWindows()
            if os.path.isfile('temp_files/temp.jpg'):
                os.remove("temp_files/temp.jpg")

        # print summary
        if totalFrameCount > 0:
            if (frameNsfw > 0):
                if (minorDetected):
                    print("This video contains minors, and " +
                          str(round((frameNsfw / totalFrameCount * 100), 1)) +
                          "% of the video contains NSFW elements.")
                else:
                    print(
                        str(round((frameNsfw / totalFrameCount * 100), 1)) +
                        "% of the video contains NSFW elements.")
            else:
                print("Video is SFW.")
        else:
            print(
                "No video frames were detected!  Please check the file type or file name."
            )
def main(args):
    # HOST='192.168.1.222'
    # PORT=8485
    # s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    # print('Socket created')
    # s.bind((HOST,PORT))
    # print('Socket bind complete')
    # s.listen(10)
    # print('Socket now listening')
    # conn,addr=s.accept()
    # data1 = b""
    # payload_size = struct.calcsize(">L")
    # print("payload_size: {}".format(payload_size))
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None) #It calls create_mtcnn function from the detect_and_align file 

            load_model(args.model) #IT loads the facenet 20170512-110547.pb pre-trained model
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, args.threshold)

            #cap = cv2.VideoCapture(0)
            #frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = True
            show_bb = True
            show_id = True
            show_fps = False
            show_bb1 = True
            HOST='192.168.1.237'
            PORT=8485
            s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
            print('Socket created')
            s.bind((HOST,PORT))
            print('Socket bind complete')
            s.listen(10)
            print('Socket now listening')
            conn,addr=s.accept()
            data1 = b""
            payload_size = struct.calcsize(">L")
            print("payload_size: {}".format(payload_size))
            count=0
            while(True):
                while len(data1) < payload_size:
                    print("Recv: {}".format(len(data1)))
                    data1 += conn.recv(8046)

                print("Done Recv: {}".format(len(data1)))
                packed_msg_size = data1[:payload_size]
                data1 = data1[payload_size:]
                msg_size = struct.unpack(">L", packed_msg_size)[0]
                
                #print("msg_size:{} {}".format(msg_size))
                #count+=1
                while len(data1) < msg_size:
                    data1 += conn.recv(8046)
                frame_data = data1[:msg_size]
                #print(frame_data)
                data1 = data1[msg_size:]

                frame=pickle.loads(frame_data, fix_imports=True, encoding="bytes")
                frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
                start = time.time()
                # v_offset = 50 
                # _, frame = data1.read()
                frame1=frame

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Matches in frame:')
                    matching_ids, matching_distances = id_data.find_matching_ids(embs)

                    for bb, landmark, matching_id, dist in zip(padded_bounding_boxes, landmarks, matching_ids, matching_distances):
                        if matching_id is None:
                            matching_id = 'Unknown'
                            print('Unknown! Couldn\'t fint match.')
                        else:
                            #if(int(bb[0])<=170 and int(bb[3])<=357 and int(bb[2])<=437 and int(bb[1])<=164):
                            print('Hi akki %s! Distance: %1.4f' % (matching_id, dist))
                            now=datetime.now()
                            #time1=now.strftime("%I:%M:%S %p")
                            csvData = [matching_id, dist,now.strftime("%x  %I:%M:%S %p")]
                            

                        if show_id:                            
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            print("bb_Frame",bb)
                            if(210<int(bb[0])<350 and 150<int(bb[1])<250 and 300<int(bb[2])<460 and 310<int(bb[3])<450):

                                welcome=" Welcome to Infogen labs"
                                cv2.putText(frame,matching_id+ welcome, (0, 50), font, 1, (0,0,255), 2, cv2.LINE_AA)
                                cv2.putText(frame,matching_id+now.strftime(" %I:%M%p"), (bb[0], bb[3]), font, 1, (0,0,255), 2, cv2.LINE_AA)
                                csvData = [matching_id, dist,now.strftime("%x  %I:%M:%S %p")]
                                with open('E:/clients/Face_Detection_&_RecognitionV2/Server-side-partV2/Student2.csv', 'a') as csvFile:
                                    writer = csv.writer(csvFile)
                                    writer.writerow(csvData)
                                csvFile.close()
                                '''if(matching_id=="Akshay"):
                                    with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Akshay.csv', 'a') as csvFile:
                                        writer = csv.writer(csvFile)
                                        writer.writerow(csvData)
                                    csvFile.close()
                                if(matching_id=="Ajinkya"):
                                    with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Ajinkya.csv', 'a') as csvFile:
                                        writer = csv.writer(csvFile)
                                        writer.writerow(csvData)
                                    csvFile.close()
				'''



                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 1)
                        if show_bb1:
                            cv2.rectangle(frame1, (261,174),(457,380), (255,0,255),2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                x=(int(landmark[j]))
                                top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                else:
                    print('Couldn\'t find a face')

                end = time.time()

                #seconds = end - start
                #fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frame)

                key = cv2.waitKey(80)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            #cap.release()
            cv2.destroyAllWindows()
Beispiel #25
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None)

            load_model(args.model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings,
                             images_placeholder, phase_train_placeholder,
                             args.threshold)

            cap = cv2.VideoCapture(0)

            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

            show_landmarks = False
            show_bb = False
            show_id = True
            show_fps = False
            while (True):

                def urljatayu():
                    aran = urllib.parse.quote(nama)
                    pintu = urllib.request.urlopen(
                        urllib.request.Request(
                            'http://jatayu.io/api/data/646ce982-de9b-11e9-8801-e4066d151e57/v1?value=1'
                        ))
                    ket = urllib.request.urlopen(
                        urllib.request.Request(
                            'http://jatayu.io/api/data/646ce982-de9b-11e9-8801-e4066d151e57/v2?value=CAM'
                        ))
                    saha = urllib.request.urlopen(
                        urllib.request.Request(
                            'http://jatayu.io/api/data/646ce982-de9b-11e9-8801-e4066d151e57/v3?value=%s'
                            % aran))
                    print("Membuka pintu")

                start = time.time()
                _, frame = cap.read()

                # Locate faces and landmarks in frame
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)

                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)

                    print('Wajah terdeteksi:')
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)

                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        if matching_id is None:
                            matching_id = 'Unknown'
                            print('Tidak dikenal! Tidak ada wajah yang cocok.')
                            cv2.putText(frame, "Akses Ditolak ", (50, 50),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 0, 255), 2, cv2.LINE_AA)

                        else:
                            print('Sampurasun %s! Jarak: %1.4f' %
                                  (matching_id, dist))
                            global dikenal
                            dikenal = True

                            cv2.putText(frame, "Akses Diterima ", (50, 50),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (0, 255, 0), 2, cv2.LINE_AA)

                            #ganti "_" jadi " "
                            namanya = matching_id.split("_")
                            nama = (' '.join(namanya))

                        if show_id:
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(frame, matching_id, (bb[0], bb[3]),
                                        font, 1, (158, 126, 0), 2, cv2.LINE_AA)
                        if show_bb:
                            cv2.rectangle(frame, (bb[0], bb[1]),
                                          (bb[2], bb[3]), (255, 0, 0), 2)
                        if show_landmarks:
                            for j in range(5):
                                size = 1
                                top_left = (int(landmark[j]) - size,
                                            int(landmark[j + 5]) - size)
                                bottom_right = (int(landmark[j]) + size,
                                                int(landmark[j + 5]) + size)
                                cv2.rectangle(frame, top_left, bottom_right,
                                              (255, 0, 255), 2)
                else:
                    print('Tidak ada wajah terdeteksi')

                    if (dikenal):
                        print("mengirim ke jatayu")
                        urljatayu()
                        dikenal = False

                end = time.time()

                seconds = end - start
                fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, int(frame_height) - 5),
                                font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.imshow('frame', frame)
                cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
                cv2.setWindowProperty("frame", cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)

                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
                elif key == ord('l'):
                    show_landmarks = not show_landmarks
                elif key == ord('b'):
                    show_bb = not show_bb
                elif key == ord('i'):
                    show_id = not show_id
                elif key == ord('f'):
                    show_fps = not show_fps

            cap.release()
            cv2.destroyAllWindows()