Exemplo n.º 1
0
def forward_pass(img, session, images_placeholder, phase_train_placeholder, embeddings, image_size):
    # If there is a human face
    if img is not None:
        # Normalize the pixel values of the image for noise reduction for better accuracy and resize to desired size
        image = load_img(
            img=img, do_random_crop=False, do_random_flip=False,
            do_prewhiten=True, image_size=image_size
        )
        # Run forward pass on FaceNet model to calculate embedding
        feed_dict = {images_placeholder: image, phase_train_placeholder: False}
        embedding = session.run(embeddings, feed_dict=feed_dict)
        return embedding

    else:
        return None
Exemplo n.º 2
0
def recognize_face(sess, pnet, rnet, onet, feature_array):
    # Get input and output tensors
    images_placeholder = sess.graph.get_tensor_by_name("input:0")
    images_placeholder = tf.image.resize_images(images_placeholder, (160, 160))
    embeddings = sess.graph.get_tensor_by_name("embeddings:0")
    phase_train_placeholder = sess.graph.get_tensor_by_name("phase_train:0")

    image_size = args.image_size
    embedding_size = embeddings.get_shape()[1]

    cap = cv2.VideoCapture(imagePath)

    # Default resolutions of the frame are obtained.The default resolutions are system dependent.
    # We convert the resolutions from float to integer.
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    fps = cap.get(cv2.CAP_PROP_FPS)

    #out = cv2.VideoWriter(imagePathw,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
    out = cv2.VideoWriter(imagePathw,
                          cv2.VideoWriter_fourcc('M', 'J', 'P',
                                                 'G'), fps, (1920, 1080))

    while (True):
        ret, gray = cap.read()
        #gray = cv2.cvtColor(frame, 0)
        gray = cv2.resize(gray, (1920, 1080))

        if ret == True:
            #print(gray.size)
            #faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            response, faces, bboxs = align_face(gray, pnet, rnet, onet)
            #print(response)
            if (response == True):
                for i, image in enumerate(faces):
                    bb = bboxs[i]
                    images = load_img(image, False, False, image_size)

                    feed_dict = {
                        images_placeholder: images,
                        phase_train_placeholder: False
                    }
                    #feature_vector = sess.run(embeddings, feed_dict=feed_dict)
                    feature_vector = sess.run(embeddings, feed_dict)

                    result, diff = identify_person(feature_vector,
                                                   feature_array, 5)
                    #print(result.split("/")[2])

                    W = int(bb[2] - bb[0]) // 2
                    H = int(bb[3] - bb[1]) // 2

                    if (result.split("/")[0] == 'unknown'):
                        #cv2.rectangle(gray,(bb[0],bb[1]),(bb[2],bb[3]),(255,0,0),2)
                        #cv2.rectangle(gray,(bb[0]-1,bb[1]),(bb[2]+1,bb[1]-18),(255,0,0),cv2.FILLED)
                        #cv2.rectangle(gray,(bb[0]-1,bb[3]),(bb[2]+1,bb[3]+18),(255,0,0),cv2.FILLED)
                        #cv2.putText(gray, result.split("/")[0] + " " + str('%.2f' % diff),(bb[0],bb[1]-5), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,191),1,cv2.LINE_AA)
                        #cv2.putText(gray, str(diff),(bb[0],bb[3]+10), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1,cv2.LINE_AA)
                        putTextName(gray, bb,
                                    result.split("/")[0], str(diff),
                                    (0, 0, 255))
                        print("\x1b[1;31;40mUnknown " + "\x1b[0m" +
                              result.split("/")[1])
                    else:
                        #cv2.rectangle(gray,(bb[0],bb[1]),(bb[2],bb[3]),(0,255,0),2)
                        #cv2.rectangle(gray,(bb[0]-1,bb[1]),(bb[2]+1,bb[1]-18),(0,255,0),cv2.FILLED)
                        #cv2.rectangle(gray,(bb[0]-1,bb[3]),(bb[2]+1,bb[3]+18),(0,255,0),cv2.FILLED)
                        #cv2.putText(gray, result.split("/")[0] + " " + str('%.2f' % diff),(bb[0],bb[1]-5), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1,cv2.LINE_AA)
                        #cv2.putText(gray, str(diff),(bb[0],bb[3]+10), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1,cv2.LINE_AA)
                        putTextName(gray, bb,
                                    result.split("/")[0], str(diff),
                                    (255, 255, 127))
                        print("\x1b[1;32;40mMatched " + "\x1b[0m" +
                              result.split("/")[1])

            #gray = cv2.resize(gray, (1280, 720))
            out.write(gray)
Exemplo n.º 3
0
def recognize_face(sess, pnet, rnet, onet, feature_array):
    # Get input and output tensors
    images_placeholder = sess.graph.get_tensor_by_name("input:0")
    images_placeholder = tf.image.resize_images(images_placeholder, (160, 160))
    embeddings = sess.graph.get_tensor_by_name("embeddings:0")
    phase_train_placeholder = sess.graph.get_tensor_by_name("phase_train:0")

    image_size = args.image_size
    embedding_size = embeddings.get_shape()[1]

    #cap = cv2.VideoCapture(-1)
    #while(True):
    #ret, frame = cap.read()
    #gray = cv2.cvtColor(frame, 0)

    img = misc.imread(imagePath)
    #gray = cv2.cvtColor(img, 0)
    gray = img

    #if cv2.waitKey(1) & 0xFF == ord('q'):
    #    cap.release()
    #    cv2.destroyAllWindows()
    #    break
    if (gray.size > 0):
        print(gray.size)
        #faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        response, faces, bboxs = align_face(gray, pnet, rnet, onet)
        print(response)
        if (response == True):
            for i, image in enumerate(faces):
                bb = bboxs[i]
                images = load_img(image, False, False, image_size)

                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                #feature_vector = sess.run(embeddings, feed_dict=feed_dict)
                feature_vector = sess.run(embeddings, feed_dict)

                result, diff = identify_person(feature_vector, feature_array,
                                               5)
                #print(result.split("/")[2])

                W = int(bb[2] - bb[0]) // 2
                H = int(bb[3] - bb[1]) // 2

                if (result.split("/")[0] == 'unknown'):
                    #cv2.rectangle(gray,(bb[0],bb[1]),(bb[2],bb[3]),(255,0,0),2)
                    #cv2.rectangle(gray,(bb[0]-1,bb[1]),(bb[2]+1,bb[1]-18),(255,0,0),cv2.FILLED)
                    #cv2.rectangle(gray,(bb[0]-1,bb[3]),(bb[2]+1,bb[3]+18),(255,0,0),cv2.FILLED)
                    #cv2.putText(gray, result.split("/")[0] + " " + str('%.2f' % diff),(bb[0],bb[1]-5), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,191),1,cv2.LINE_AA)
                    #cv2.putText(gray, str(diff),(bb[0],bb[3]+10), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1,cv2.LINE_AA)
                    putTextName(gray, bb,
                                result.split("/")[0], str(diff), (255, 0, 0))
                    print("\x1b[1;31;40mUnknown " + "\x1b[0m" +
                          result.split("/")[1])
                else:
                    #cv2.rectangle(gray,(bb[0],bb[1]),(bb[2],bb[3]),(0,255,0),2)
                    #cv2.rectangle(gray,(bb[0]-1,bb[1]),(bb[2]+1,bb[1]-18),(0,255,0),cv2.FILLED)
                    #cv2.rectangle(gray,(bb[0]-1,bb[3]),(bb[2]+1,bb[3]+18),(0,255,0),cv2.FILLED)
                    #cv2.putText(gray, result.split("/")[0] + " " + str('%.2f' % diff),(bb[0],bb[1]-5), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1,cv2.LINE_AA)
                    #cv2.putText(gray, str(diff),(bb[0],bb[3]+10), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1,cv2.LINE_AA)
                    putTextName(gray, bb,
                                result.split("/")[0], str(diff),
                                (127, 255, 255))
                    print("\x1b[1;32;40mMatched " + "\x1b[0m" +
                          result.split("/")[1])

            misc.imsave(imagePathw, gray)
Exemplo n.º 4
0
def main(args):

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Get the paths for the corresponding images

            vinayak = [
                'datasets/kar_Vin_aligned/vinayak/' + f
                for f in os.listdir('datasets/kar_Vin_aligned/vinayak')
            ]
            karthik = [
                'datasets/kar_Vin_aligned/karthik/' + f
                for f in os.listdir('datasets/kar_Vin_aligned/karthik')
            ]
            ashish = [
                'datasets/kar_Vin_aligned/Ashish/' + f
                for f in os.listdir('datasets/kar_Vin_aligned/Ashish')
            ]
            saurabh = [
                'datasets/kar_Vin_aligned/Saurabh/' + f
                for f in os.listdir('datasets/kar_Vin_aligned/Saurabh')
            ]
            hari = [
                'datasets/kar_Vin_aligned/Hari/' + f
                for f in os.listdir('datasets/kar_Vin_aligned/Hari')
            ]
            paths = vinayak + karthik + ashish + saurabh + hari
            #np.save("images.npy",paths)
            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            images_placeholder = tf.image.resize_images(
                images_placeholder, (160, 160))
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]
            extracted_dict = {}

            # Run forward pass to calculate embeddings
            for i, filename in enumerate(paths):

                images = facenet.load_img(filename, False, False, image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                feature_vector = sess.run(embeddings, feed_dict=feed_dict)
                extracted_dict[filename] = feature_vector
                if (i % 100 == 0):
                    print("completed", i, " images")

            with open('extracted_dict.pickle', 'wb') as f:
                pickle.dump(extracted_dict, f)
Exemplo n.º 5
0
def recognize_face(sess, pnet, rnet, onet, feature_array):
    # Get input and output tensors
    images_placeholder = sess.graph.get_tensor_by_name("input:0")
    images_placeholder = tf.image.resize_images(images_placeholder, (160, 160))
    embeddings = sess.graph.get_tensor_by_name("embeddings:0")
    phase_train_placeholder = sess.graph.get_tensor_by_name("phase_train:0")

    image_size = args.image_size
    embedding_size = embeddings.get_shape()[1]

    #cap = cv2.VideoCapture(-1)
    cap = cv2.VideoCapture(0)

    while (True):
        ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, 0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # To close the window
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cap.release()
            cv2.destroyAllWindows()
            break

        if (gray.size > 0):
            print(gray.size)
            response, faces, bboxs = align_face(gray, pnet, rnet, onet)
            print(response)
            if (response == True):
                for i, image in enumerate(faces):
                    bb = bboxs[i]
                    images = load_img(image, False, False, image_size)
                    feed_dict = {
                        images_placeholder: images,
                        phase_train_placeholder: False
                    }
                    feature_vector = sess.run(embeddings, feed_dict=feed_dict)
                    result, accuracy = identify_person(feature_vector,
                                                       feature_array, 8)
                    #print(type(result))
                    #print(len(result.split("/")))
                    print(result.split("\\")[7])
                    print(accuracy)

                    if accuracy < 0.9:
                        cv2.rectangle(gray, (bb[0], bb[1]), (bb[2], bb[3]),
                                      (255, 255, 255), 2)
                        W = int(bb[2] - bb[0]) // 2
                        H = int(bb[3] - bb[1]) // 2
                        #cv2.putText(gray,"Hello "+result.split['/'][0],(bb[0]+W-(W//2),bb[1]-7), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1,cv2.LINE_AA)
                        cv2.putText(
                            gray,
                            "Hello " + result.split("\\")[7].split('.')[0],
                            (bb[0] + W - (W // 2), bb[1] - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                    else:
                        cv2.rectangle(gray, (bb[0], bb[1]), (bb[2], bb[3]),
                                      (255, 255, 255), 2)
                        W = int(bb[2] - bb[0]) // 2
                        H = int(bb[3] - bb[1]) // 2
                        cv2.putText(gray, "WHO ARE YOU ?",
                                    (bb[0] + W - (W // 2), bb[1] - 7),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 255, 255), 1, cv2.LINE_AA)
                    del feature_vector

            cv2.imshow('img', gray)
        else:
            continue