Esempio n. 1
0
        #####################################selecting primary target face start

        if len(faces) > 0:

            print("Primary Face Found")
            primaryFaceCoordinate = faces[0]
            naj = primaryFaceCoordinate
            print(primaryFaceCoordinate)

            [x, y, w, h] = primaryFaceCoordinate

            face = gray[y:y + h, x:x + w]
            face = cv2.resize(face, (350, 350))  # cropping the face 350*350

            #####################################emotion start
            fisherEmotion = fish.findEmotion(face)  # fisherface

            cv2.imwrite("tensorFace.jpg", face)
            image = cv2.imread(
                'C:/Users/Nimzan/PycharmProjects/FinalApplication/tensorFace.jpg'
            )
            tensorEmotions, tensorValues = tensor.getEmotions(
                image, sess, finalTensor)
            tensorEmotion = tensorEmotions[0]
            #####################################emotion end

            #####################################comparison start
            if fisherEmotion == "happy":
                finalEmotion = fisherEmotion
            elif fisherEmotion == "surprise":
                finalEmotion = fisherEmotion
Esempio n. 2
0
def fingerAndFacial():
    def gohead():

        third = Toplevel()

        third.title("Music Player")
        third.geometry("500x360+120+120")
        label = Label(third, text='Music Player')
        label.pack()
        label = Label(third)
        label.pack()
        label12 = Label(third, text=finalEmotion, font=20).pack()

        label = Label(third)
        label.pack()
        label = Label(third)
        label.pack()
        label = Label(third)
        label.pack()

        v = StringVar()
        songlabel = Label(third, textvariable=v, width=35)

        index = 0

        def updatelabel():
            global index
            global songname

            # v.set(realnames[index])
            # v.set("happy")
            # return songname

        def nextsong(event):
            global index
            # index += 1
            # pygame.mixer.music.load(listofsongs[index])
            # pygame.mixer.music.play()
            if finalEmotion == "happy":
                #pygame.init()
                #drum = pygame.mixer.Sound("happy.ogg")
                #drum.play()
                mixer.init()
                pygame.mixer.Sound("happy.ogg")
                pygame.mixer.music.load("happy.ogg")
                pygame.mixer.music.play()

            if finalEmotion == "sadness":
                mixer.init()
                pygame.mixer.Sound("sad.ogg")
                pygame.mixer.music.load("sad.ogg")
                pygame.mixer.music.play()
            if finalEmotion == "anger":
                mixer.init()
                pygame.mixer.Sound("fear.ogg")
                pygame.mixer.music.load("fear.ogg")
                pygame.mixer.music.play()

            updatelabel()

        def stopsong(event):
            pygame.mixer.music.stop()
            v.set("")
            # return songname

    # listbox = Listbox(third)
    # listbox.pack()

        nextbutton = Button(third, text='Play Music', bg="Gray", fg="White")
        nextbutton.pack()
        label = Label(third)
        label.pack()
        stopbutton = Button(third, text='Stop Music', bg="Gray", fg="White")
        stopbutton.pack()

        nextbutton.bind("<Button-1>", nextsong)

        stopbutton.bind("<Button-1>", stopsong)

        songlabel.pack()

    def combine():
        gohead()
        second.destroy()

    second = Toplevel()
    second.geometry("600x470+120+120")
    label11 = Label(second).pack()
    frame1 = Frame(second)

    frame1.pack()

    scrollBar = Scrollbar(frame1)
    scrollBar.pack(side=RIGHT, fill=Y)
    listbox = Listbox(frame1,
                      width=50,
                      height=20,
                      yscrollcommand=scrollBar.set)
    listbox.pack(side=LEFT, fill="both")

    #####################################d###################intializing tensorflow variables start

    ###############################################################################file paths start
    RETRAINED_LABELS_TXT_FILE_LOC = "C:/Users/Nimzan/PycharmProjects/FinalApplication/TensorFlow/retrained_labels.txt"
    RETRAINED_GRAPH_PB_FILE_LOC = "C:/Users/Nimzan/PycharmProjects/FinalApplication/TensorFlow/retrained_graph.pb"
    ###############################################################################file paths end

    retrainedGraphFile = tf.gfile.FastGFile(RETRAINED_GRAPH_PB_FILE_LOC,
                                            'rb')  # Loading the trained graph
    graphDef = tf.GraphDef()  # creating graphdef object
    graphDef.ParseFromString(retrainedGraphFile.read())  # parsing the graph
    tf.import_graph_def(
        graphDef, name='')  # import the graph into the current default Graph

    sess = tf.Session()
    finalTensor = sess.graph.get_tensor_by_name('final_result:0')

    #####################################d###################intializing tensorflow variables end

    mouth_cascade = cv2.CascadeClassifier(
        'TensorflowMouth/haarcascade_mcs_mouth.xml')

    cascPath = 'haarcascade_filters/haarcascade_frontalface_default.xml'
    faceCascade = cv2.CascadeClassifier(cascPath)

    video_capture = cv2.VideoCapture(0)

    import serial
    from sklearn.externals import joblib
    from sklearn.model_selection import train_test_split
    from sklearn.linear_model import LogisticRegression

    arduino = serial.Serial('COM3', 115200, timeout=10)

    filename = "C:/Users/Nimzan/PycharmProjects/FinalApplication/Pulse/model.sav"
    loaded_model = joblib.load(filename)

    while True:

        #################################################################################################################### pulse start
        data = arduino.readline(
        )[:-2]  # the last bit gets rid of the new-line chars
        if data:
            # print(data)# create data
            bpm = int(data)
            if (39 > bpm or 200 < bpm):
                print("BPM Exceeds the limit")
                continue
            else:
                # print(bpm)
                Pulseemotions = loaded_model.predict([[bpm]])
                # print(emotion)

                print("BPM : " + str(bpm) + "    Expected Emotions : " +
                      Pulseemotions[0])
                listbox.insert(
                    END, "BPM : " + str(bpm) + "    Expected Emotions : " +
                    Pulseemotions[0])

        #################################################################################################################### pulse end

        ret, frame = video_capture.read()  # capturing frames in in videostream
        gray = cv2.cvtColor(
            frame, cv2.COLOR_BGR2GRAY)  # converting the frame to grayscale

        #####################################mouth start

        mouth_rects = mouth_cascade.detectMultiScale(gray, 1.7, 11)
        for (x, y, w, h) in mouth_rects:
            y = int(y - 0.15 * h)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
            break

        #####################################mouth end

        #####################################detecting faces start
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
        )
        #####################################detecting faces end

        #####################################selecting primary target face start
        if len(faces) > 0:
            print("Primary Face Found")
            listbox.insert(END, "Primary face found")
            primaryFaceCoordinate = faces[0]
            print(primaryFaceCoordinate)
            listbox.insert(END, primaryFaceCoordinate)

            [x, y, w, h] = primaryFaceCoordinate

            face = gray[y:y + h, x:x + w]
            face = cv2.resize(face, (350, 350))  # cropping the face 350*350

            #####################################emotion start
            fisherEmotion = fish.findEmotion(face)  # fisherface

            cv2.imwrite("tensorFace.jpg", face)
            image = cv2.imread(
                'C:/Users/Nimzan/PycharmProjects/FinalApplication/tensorFace.jpg'
            )
            tensorEmotions, tensorValues = tensor.getEmotions(
                image, sess, finalTensor)
            tensorEmotion = tensorEmotions[0]
            #####################################emotion end

            #####################################comparison start
            if fisherEmotion == "happy":
                finalEmotion = fisherEmotion
            elif fisherEmotion == "surprise":
                finalEmotion = fisherEmotion
            elif fisherEmotion == tensorEmotions[0]:
                finalEmotion = fisherEmotion
            elif fisherEmotion == tensorEmotions[1]:
                finalEmotion = fisherEmotion
            elif fisherEmotion == tensorEmotions[2]:
                finalEmotion = fisherEmotion
            else:
                finalEmotion = tensorEmotions[0]

            print(finalEmotion)
            listbox.insert(END, finalEmotion)

            #####################################comparison end

        else:
            print("No face Found")
        #####################################selecting primary target face end

        if finalEmotion != Pulseemotions[0]:
            print("Emotion is not relavant to pulse")
            print(" ")
            listbox.insert(END, "Emotion is not relavant to pulse")
            listbox.insert(END, " ")

        else:
            print("Emotion seems legit")
            listbox.insert(END, "Emotion seems legit")
            break

        #####################################writing on frame start
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (10, 25)
        fontScale = 1
        fontColor = (0, 255, 0)
        lineType = 2

        cv2.putText(frame, finalEmotion, bottomLeftCornerOfText, font,
                    fontScale, fontColor, lineType)
        '''
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (500, 25)q
        fontScale = 1
        fontColor = (0, 255, 0)
        lineType = 2

        cv2.putText(frame, fisherEmotion,
                    bottomLeftCornerOfText,
                    font,
                    fontScale,
                    fontColor,
                    lineType)
        '''
        #####################################writing on frame end

        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                      2)  # drawing rectangle aronf the face
        cv2.imshow('Video', frame)  # Display updated frame

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    scrollBar.config(command=listbox.yview)
    label11 = Label(second).pack()
    label1 = Label(second, text="YOUR EMOTION IS : " + finalEmotion,
                   font=20).pack()
    label11 = Label(second).pack()
    nextbutton = Button(second,
                        width=20,
                        text='PLAY MUSIC',
                        command=combine,
                        bg="Gray",
                        fg="White")
    nextbutton.pack()

    # releasing the capture after all done
    video_capture.release()
    cv2.destroyAllWindows()