Esempio n. 1
0
def is_smiling(img):
    """Given an image (bytes), return True if a mouth is discovered and it is smiling."""
    mouth = m.findmouth(img)
    # show(mouth)
    if mouth != 2:  # did not return error
        mouthimg = crop(mouth)
        cv.SaveImage("webcam-m.jpg", mouthimg)
        # predict the captured emotion
        result = lr.predict(vectorize("webcam-m.jpg"))
        return result == 1
    else:
        # "failed to detect mouth. Try hold your head straight and make sure there is only one face."
        return False
Esempio n. 2
0
def is_smiling(lr, image):
    mouth_image_name = 'temp-mouth.jpg'
    mouth = m.findmouth(image)
    # show(mouth)
    if mouth != 2: # did not return error
        mouthimg = crop(mouth)
        cv.SaveImage(mouth_image_name, mouthimg)
        # predict the captured emotion
        result = lr.predict(vectorize(mouth_image_name))
        if result == 1:
            return 1
        else:
            return 0
    else:
        return -1;
Esempio n. 3
0
def is_smiling(img=None,filename=None):
    if img is None:
        if filename is None:
            raise Exception("You need to specify an img or a filename")
        img = open_as_cv(filename)
    """Given an image (bytes), return True if a mouth is discovered and it is smiling."""
    mouth = m.findmouth(img)
    print "is_smiling: mouth {}".format(mouth)
    if mouth != 2: # did not return error
        mouthimg = crop(img, mouth)
        cv.SaveImage("webcam-m.jpg", mouthimg)
        # predict the captured emotion
        result = SMILE_LR.predict(vectorize('webcam-m.jpg'))
        return (result == 1)
    else:
        #print "failed to detect mouth. Try hold your head straight and make sure there is only one face."
        return False
Esempio n. 4
0
    def do_POST(self):
        logging.warning("======= POST STARTED =======")
        logging.warning(self.headers)
        form = cgi.FieldStorage(
            fp=self.rfile,
            headers=self.headers,
            environ={'REQUEST_METHOD':'POST',
                     'CONTENT_TYPE':self.headers['Content-Type'],
                     })
        logging.warning("======= POST VALUES =======")
	image_base64 = form.getvalue("image")
	g = open("out.jpg", "w")
	g.write(image_base64.decode('base64'))
	g.close()
        img = cv.LoadImage("out.jpg") # input image
        mouth = m.findmouth(img)
        # show(mouth)
        if mouth != 2: # did not return error
        	mouthimg = crop(mouth, img)
                cv.SaveImage("webcam-m.jpg", mouthimg)
                # predict the captured emotion
                result = lr.predict(vectorize('webcam-m.jpg'))
                if result == 1:
                    print "you are smiling! :-) "
		    ans = "S"
                else:
                    print "you are not smiling :-| "
		    ans = "F"
	else:
		ans = "D"
                print "failed to detect mouth. Try hold your head straight and make sure there is only one face."
        logging.warning("\n")
	self.send_response(200, "OK")
	self.send_header("Content-type", "application/json")
	self.end_headers()
	print ans
	self.wfile.write("{result:" + ans + "}")
Esempio n. 5
0
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = face.detectMultiScale(gray,
                                      scaleFactor=1.1,
                                      minNeighbors=5,
                                      minSize=(30, 30),
                                      flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        if True:
            cv.SaveImage("webcam.jpg", cv.fromarray(frame))
            img = cv.LoadImage("webcam.jpg")  # input image
            mouth = m.findmouth(img)
            smile = 0
            nosmile = 0
            #show(mouth)
            if mouth != 2:  # did not return error
                mouthimg = crop(mouth)
                cv.SaveImage("webcam-m.jpg", mouthimg)
                # predict the captured emotion
                result = lr.predict(vectorize('webcam-m.jpg'))
                if result == 1:
                    print "You are smiling! :-) "
                    smile = smile + 1
                else:
                    print "You are not smiling :-/ "
                    nosmile = nosmile + 1
            else:
   cv.SaveImage("webcam.jpg", cv.fromarray(frame))
   print x,y,w,h
  
   img = cv.LoadImage("webcam.jpg")
   
   cropping=img[y-30: y + h+50, x-30: x + w+50]
   if(i==1):
       cv.SaveImage("crop1.jpg", cropping)
   else:
       cv.SaveImage("crop2.jpg", cropping)
 
   if(i==1):
       img = cv.LoadImage("crop1.jpg")
   else:
       img= cv.LoadImage("crop2.jpg")
   mouth = m.findmouth(img)
  
   if mouth != 2: 
       mouthimg = crop(mouth)
       if(i==1):
           cv.SaveImage("webcam-m1.jpg", mouthimg)
       else:
           cv.SaveImage("webcam-m2.jpg", mouthimg)
     
       if(i==1):
           result = lr.predict(vectorize('webcam-m1.jpg'))
       else:
           result = lr.predict(vectorize('webcam-m2.jpg'))
       if result == 1:
           print "face",i, ": You are smiling! :-) "
           smile=smile+1
Esempio n. 7
0
        phi[idx] = vectorize(PATH + filename)
        labels.append(2)'''

    """
    training the data with logistic regression
    """
    lr = logistic.Logistic(dim)
    lr.train(phi, labels)
    

    """
    open webcam and capture images
    """
   
    img = cv2.imread("/home/qbuser/Desktop/4.jpeg") # input image
    mouth = mouthdetection.findmouth(img)
            # show(mouth)
    #if mouth != 2: # did not return error
                #mouthimg = crop(mouth)
    cv2.imwrite("webcam-m.jpg", mouth)
                # predict the captured emotion
    result = lr.predict(vectorize('webcam-m.jpg'))
    if result == 1:
                    print "you are smilinggggggggg! :-) "
    elif result == 0:
                    print "you are neutral "
    #elif result==2:
		    print "you are sad"
    #else:
     #           print "failed to detect mouth. Try hold your head straight and make sure there is only one face."
    
        # frame : np array
    else:
        rval = False

    print "\n\n\n\n\npress q to exit"


    while rval:
        cv2.imshow("preview", frame)
        rval, frame = vc.read()

        if cv2.waitKey(10) & 0xFF == ord('q'): # exit on ESC
            break
        else :
            t0 = time.time()
            face, mouth = m.findmouth(frame, haarFace, haarMouth)
            t1 = time.time()
            print t1 - t0


            try:
                (x, y, w, h) = face
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                (x, y, w, h) = mouth
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)

            except ValueError:
                continue

            # shows(mouth)
            if len(mouth) != 0: # did not return error
Esempio n. 9
0
# ksize  Size of the filter returned.
# sigma  Standard deviation of the gaussian envelope.
# theta  Orientation of the normal to the parallel stripes of a Gabor function.
# lambd  Wavelength of the sinusoidal factor.
# gamma  Spatial aspect ratio.
# psi  Phase offset.
# ktype  Type of filter coefficients. It can be CV_32F or CV_64F .

thets = np.arange(0, np.pi, np.pi/8)


for thet in thets :
    kern = cv2.getGaborKernel(ksize = (9, 9), sigma = 3, theta = thet, lambd = 3.13,
                              gamma = 3, psi = 0, ktype=cv2.CV_32F)
    cap = cv2.VideoCapture(video_jordan)
    while cap.isOpened():
        ret, frame = cap.read()

        if frame is not None:
            detected_object = mouthdetection.findmouth(frame, haar_face, haar_mouth)[0]
            frame = smile_detection.crop(detected_object, frame)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.filter2D(gray, cv2.CV_8UC3, kern)
            edges = cv2.Canny(gray,100,100)
            cv2.imshow('frame', edges)

            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        else:
            break