def testing_run(frame):

    try:
        img = frame
        #face_locations = face_recognition.face_locations(img)
        face_locations = face_recognition.face_locations(
            image, number_of_times_to_upsample=0, model="cnn")

        #print("I found {} face(s) in this photograph.".format(len(face_locations)))

        for face_location in face_locations:

            # Print the location of each face in this image
            top, right, bottom, left = face_location
            #print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

            # You can access the actual face itself like this:
            face_image = img[top:bottom, left:right]
            pil_image = Image.fromarray(face_image)

            example_idx = 20

            example_idx = 20
            example_image = np.asarray(pil_image)

            #example_image =cv2.cvtColor(example_image, cv2.COLOR_BGR2GRAY)

            # obtain embedding vector for image
            myemd = return_emd(example_image)

            example_prediction = svc.predict([myemd])
            #imf = cv2.cvtColor(face_recognition.load_image_file("myimg.jpg"), cv2.COLOR_BGR2GRAY)
            fd = face1.face_recognition.face_encodings(example_image)[0]
            example_prediction1 = svc1.predict([fd])

            #print("svc1 pred: "+str(np.amax(svc1.predict_proba([fd]))))

            example_identity = encoder.inverse_transform(example_prediction)[0]
            example_identity1 = encoder1.inverse_transform(
                example_prediction1)[0]
            #print(str(example_identity)+" -- "+str(example_identity1))
            thresh = 0.25
            #print(face1.distance(mic[(example_identity1,0)],fd))
            #print(face1.distance(mic[(example_identity1,1)],fd))

            if face1.distance(mic[(example_identity1, 0)],
                              fd) < thresh and face1.distance(
                                  mic[(example_identity1, 1)], fd) < thresh:
                print(example_identity1)
                return example_identity1

            else:
                print("unknown")
                return "unknown"

        #plt.imshow(example_image)
        #plt.title(f'Recognized as {example_identity}');
    except Exception as e:
        print(e)
def testing_run(frame):
    global thresh

    if 1:
        img = frame
        face_locations = face_recognition.face_locations(img)
        #print("I found {} face(s) in this photograph.".format(len(face_locations)))
        if len(face_locations) == 0:
            print("noface")
            #data_maintain("noface",img,np.array(np.random(1,128)))
        else:
            for face_location in face_locations:

                # Print the location of each face in this image
                top, right, bottom, left = face_location
                # You can access the actual face itself like this:
                face_image = img[top:bottom, left:right]
                #cv2.rectangle(frame, (left-5, top-5), (right-5, bottom+5), (255,0,0), 2)
                cv2.imwrite('sada.jpg', face_image)
                cv2.waitKey(0)

                example_image = np.asarray(face_image)

                fd = face_recognition.face_encodings(example_image)[0]
                example_prediction1 = knn1.predict([fd])

                example_identity1 = encoder1.inverse_transform(
                    example_prediction1)[0]

                if face1.distance(mic[
                    (example_identity1, 1)], fd) < thresh and face1.distance(
                        mic[(example_identity1, 2)], fd) < thresh:
                    print(example_identity1)
                    # data_maintain(example_identity1, face_image,fd)

                # elif face1.distance(mic[(example_identity1,0)],fd) < thresh2 and face1.distance(mic[(example_identity1,1)],fd) < thresh2 :
                #     print(example_identity1)
                #     print("2")
                #     for_asking(example_identity1, face_image,fd)
                else:
                    print("unknown")
def retrain():
        now = datetime.datetime.now()
        global past_time
        if past_time == None:
                past_time = now
        time_diff =int((now-past_time).total_seconds())/60
        import platform
        if platform.system() == 'Windows':
            win =1
        else:
            win =0

        if 1:#time_diff >1:
                global main_c
                past_time = now
	                # while global_lock.locked():
	                #     continue
	                # global_lock.acquire()
                print('New Face adding...')
                main_c.execute("select * from add_person")
                data = main_c.fetchall()
                main_c.execute("truncate table add_person")
                f = h5py.File('tempFiles/temp.hdf5','a')
                g = h5py.File('tempFiles/embedded2.hdf5','a')
                for d in data:
                        if 1:
                            iemd = f[d[1]].value
                            del f[d[1]] 
                            gg = g.create_group(d[2])
                            print("XXX")
                            if win !=1:
                                os.system("mkdir images/"+str(d[2]))
                                os.system("mv temp_image/"+str(d[1])+" images/"+str(d[2])+"/1.jpg")
                                print("sadas")
                            else:
                                os.system("md images\\"+str(d[2]))
                                os.system("move temp_image\\"+str(d[1])+" images\\"+str(d[2])+"\\1.jpg")

                            no_img=1
                            gg.create_dataset(str(no_img)+".jpg", data=iemd)
                            for key in f.keys():
                                if face1.distance(f[key].value,iemd) < 0.25:
                                    print("in keys")
                                    main_c.execute('delete from notification where  img_name=%s',(key))
                                    no_img=no_img+1
                                    if win !=1:
                                        os.system("mv temp_image/"+str(key)+" images/"+str(d[2])+"/"+str(no_img)+".jpg")
                                    else:
                                        os.system("move temp_image\\"+str(key)+" images\\"+str(d[2])+"\\"+str(no_img)+".jpg")
                                    gg.create_dataset(str(no_img)+".jpg", data=f[key].value)
                                    
                                    del f[key]
                                    
                            while no_img<3:
                                    print(no_img)
                                    nn = no_img+1
                                    if win !=1:
                                        os.system("cp images/"+str(d[2])+"/"+str(no_img)+".jpg images/"+str(d[2])+"/"+str(nn)+".jpg")
                                    else:
                                        os.system("copy images\\"+str(d[2])+"\\"+str(no_img)+".jpg images\\"+str(d[2])+"\\"+str(nn)+".jpg")
                                    no_img = no_img+1
                                    gg.create_dataset(str(no_img)+".jpg", data=iemd)
                            main_c.execute('insert into user_detail(name,no_img,last_update_date) values(%s,%s,%s)',(d[2],no_img,now.strftime("%Y-%m-%d")))
                            db.commit()
                            
                        # except Exception as e:
                        #     print(e)
                # main_c.execute("select * from for_asking where flag = 1")
                # data1 = main_c.fetchall()
                # main_c.execute("delete from for_asking where flag=1")
                # for d1 in data1:
                #         local_c.execute("select no_img from user_detail where name = %s",(d1[1]))
                #         no_img = local_c.fetchone()
                #         no = no_img[0] + 1
                #         sf = len(os.listdir(os.pajoin(path, d1[1])))
                #         s =int(sf)+1
                #         local_c.execute("update user_detail set no_img = %s",(no))
                #         os.system("mv temp_image/"+str(d1[2])+" images/"+str(d1[1])+"/"+s+".jpg")
                f.close()
                g.close()
                #global_lock.release()
                print('### complete face adding ###')                #retrain model
                if not len(data) == 0:
                    pre_load()
def pre_load():
    global knn1, svc1, encoder1, mic
    metadata = face1.load_metadata('images')
    num = len(metadata)
    filename = 'tempFiles/embedded2.hdf5'
    f = h5py.File(filename, 'r')

    for raw in f:
        for j in f[raw]:
            xc = j.split('.')
            mic[str(raw), int(xc[0])] = f[raw][j][:]
    embedded2 = np.zeros((metadata.shape[0], 128))
    for i, m in enumerate(metadata):
        embedded2[i] = f[m.name + "/" + m.file].value

    f.close()

    train_in = np.ones((metadata.shape[0]), dtype=bool)
    test_in = np.ones((metadata.shape[0]), dtype=bool)
    name = None
    for i, m in enumerate(metadata):  # 90 / 10 dataset
        if name == None:
            name = m.name
            train_in[i] = False
            test_in[i] = True
            continue
        if name == m.name:
            train_in[i] = True
            test_in[i] = False
        else:
            name = m.name
            train_in[i] = False
            test_in[i] = True
    distances = []  # squared L2 distance between pairs
    identical = []  # 1 if same identity, 0 otherwise

    num = len(metadata)

    for i in range(num - 1):
        for j in range(1, num):
            distances.append(face1.distance(embedded2[i], embedded2[j]))
            identical.append(1 if metadata[i].name == metadata[j].name else 0)

    distances = np.array(distances)
    identical = np.array(identical)

    thresholds = np.arange(0.2, 0.4, 0.005)

    #print(thresholds)
    # for t in thresholds:
    #     print(identical)
    #     print(f1_score(identical, distances < t))
    f1_scores = [f1_score(identical, distances < t) for t in thresholds]
    acc_scores = [accuracy_score(identical, distances < t) for t in thresholds]

    opt_idx = np.argmax(f1_scores)
    # Threshold at maximal F1 score
    opt_tau = thresholds[opt_idx]
    # Accuracy at maximal F1 score
    opt_acc = accuracy_score(identical, distances < opt_tau)
    global thresh
    thresh = opt_tau + 0.02
    print(f'Accuracy at threshold {opt_tau:.3f} = {opt_acc:.3f}')

    #####################################

    targets2 = np.array([m.name for m in metadata])

    encoder1 = LabelEncoder()
    encoder1.fit(targets2)

    y = encoder1.transform(targets2)

    # 50 train examples of 10 identities (5 examples each)
    X_train = embedded2[train_in]
    # 50 test examples of 10 identities (5 examples each)
    X_test = embedded2[test_in]

    y_train = y[train_in]
    y_test = y[test_in]

    knn1.fit(X_train, y_train)
    svc1.fit(X_train, y_train)

    acc_knn1 = accuracy_score(y_test, knn1.predict(X_test))
    acc_svc1 = accuracy_score(y_test, svc1.predict(X_test))
    print(f'KNN accuracy = {acc_knn1}, SVM accuracy = {acc_svc1}')
def testing_run(frame):

    #try:
    img = frame
    face_locations = face_recognition.face_locations(img)
    #print("I found {} face(s) in this photograph.".format(len(face_locations)))
    if len(face_locations) == 0:
        print("noface")
        #data_maintain("noface",img,np.array(np.random(1,128)))
    else:
        cv2.imwrite("a.jpg", img)
        unknown_image = face_recognition.load_image_file("a.jpg")
        pp = Image.fromarray(unknown_image)
        draw = ImageDraw.Draw(pp)
        a = []
        for face_location in face_locations:

            # Print the location of each face in this image
            top, right, bottom, left = face_location
            # You can access the actual face itself like this:
            face_image = img[top:bottom, left:right]
            pil_image = Image.fromarray(face_image)
            draw.rectangle(((left - 5, top - 5), (right - 5, bottom + 5)),
                           outline=(200, 0, 0))

            example_image = np.asarray(face_image)

            fd = face1.face_recognition.face_encodings(example_image)[0]
            example_prediction1 = svc1.predict([fd])

            example_identity1 = encoder1.inverse_transform(
                example_prediction1)[0]
            # Draw a label with a name below the face

            thresh = 0.30
            thresh2 = 0.40

            if face1.distance(mic[(example_identity1, 0)],
                              fd) < thresh and face1.distance(
                                  mic[(example_identity1, 1)], fd) < thresh:
                print(example_identity1)
                text_width, text_height = draw.textsize(str(example_identity1))

                draw.rectangle(((left - 5, bottom - text_height - 10 - 5),
                                (right - 5, bottom + 5)),
                               fill=(200, 0, 0),
                               outline=(200, 0, 0))
                draw.text((left + 6, bottom - text_height - 5),
                          str(example_identity1),
                          fill=(255, 255, 255, 255))
                a.append(example_identity1)

            # elif face1.distance(mic[(example_identity1,0)],fd) < thresh2 and face1.distance(mic[(example_identity1,1)],fd) < thresh2 :
            #     print(example_identity1)
            #     print("2")
            #     for_asking(example_identity1, face_image,fd)
            else:
                print("unknown")
                a.append("unknown")
                text_width, text_height = draw.textsize("Unknown")
                draw.rectangle(
                    ((left, bottom - text_height - 10), (right, bottom)),
                    fill=(0, 0, 255),
                    outline=(0, 0, 255))
                draw.text((left + 6, bottom - text_height - 5),
                          "Unknown",
                          fill=(255, 255, 255, 255))

        del draw
        dx = PyMySQL.connect("localhost", "root", "", "attendence")
        cr = dx.cursor()
        now = datetime.datetime.now()
        cr.execute("INSERT INTO data(datetim) values(%s)",
                   (now.strftime('%H:%M %Y-%m-%d')))
        dx.commit()
        for x in a:
            aq = ("UPDATE data SET {}=1 where datetim='{}'").format(
                x, now.strftime('%H:%M %Y-%m-%d'))
            cr.execute(aq)

        dx.commit()
        cr.close()
        dx.close()
        pp.show()