Exemplo n.º 1
0
def initcam(n):
  network = EMR()
  network.build_network()
  video_capture = cv2.VideoCapture(0)
  font = cv2.FONT_HERSHEY_SIMPLEX
  feelings_faces = []
  for index, emotion in enumerate(EMOTIONS):
    feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
  print "Starting Cam with Timer"
  counter=0;
  running=True
  startTime=datetime.now()
  while running==True:
    timeElapsed =(datetime.now()-startTime).total_seconds()
    if timeElapsed>n:
     running=False
    print "counter:"+str(timeElapsed)
    ret, frame = video_capture.read()
    result = network.predict(format_image(frame))
    if result is not None:
      for index, emotion in enumerate(EMOTIONS):
        cv2.putText(frame, emotion, (50, index * 20 + 20), cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1);
        cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4), (255, 0, 0), -1)
      face_image = feelings_faces[result[0].tolist().index(max(result[0]))]
      foundpos=result[0].tolist().index(max(result[0]))
      print "value found="+str(foundpos)
      performance[foundpos]+=1
      counter+=1
      for c in range(0, 3):
        frame[200:320, 10:130, c] = face_image[:,:,c] * (face_image[:, :, 3] / 255.0) +  frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
    cv2.imshow('Video', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
      break
  video_capture.release()
  cv2.destroyAllWindows()
Exemplo n.º 2
0
class SmileDetector:
    def __init__(self):
        dir_path = os.path.dirname(os.path.abspath(__file__))
        self.network = EMR(
            os.path.abspath(
                os.path.join(dir_path,
                             "../Models/sentiment_net/sentiment_net")))
        self.network.build_network()
        self.final_layer = joblib.load(
            os.path.abspath(os.path.join(dir_path, "../Models/svm_model.pkl")))

    def preprocess_image(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        self.image = cv2.resize(image,
                                (48, 48), interpolation=cv2.INTER_CUBIC) / 255.

    def predict(self):
        result = self.network.predict(self.image)
        output = self.final_layer.predict(result)
        if output:
            return False
        else:
            return True
Exemplo n.º 3
0
@app.route('/api/jpeg', methods=['POST'])
def login():
    if request.method == 'POST':
        r = request
        nparr = np.fromstring(r.data, np.uint8)
        frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        result = network.predict(format_image(frame))
        resp = dict()
        if result is not None:
            resp = {
                'contains': True,
                'emotions': {
                    'angry': result.item(0),
                    'disgusted': result.item(1),
                    'fearful': result.item(2),
                    'happy': result.item(3),
                    'sad': result.item(4),
                    'surprised': result.item(5),
                    'neutral': result.item(6)
                }
            }
        else:
            resp = {'contains': False, 'emotions': None}
    return jsonify(resp)


if __name__ == '__main__':
    network.build_network()
    app.run(host='0.0.0.0')
Exemplo n.º 4
0
def format_image(image):


"""
Function to format frame
"""
if len(image.shape) > 2 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
faces = cascade_classifier.detectMultiScale(image, scaleFactor=1.3, minNeighbors=if not len(faces) > 0:
return None
max_area_face=faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face=face
face=max_area_face
image=image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
try:
image=cv2.resize(image, (48, 48), interpolation=cv2.INTER_CUBIC) / 255.
except Exception:
print("----->Problem during resize")
return None
return image
network=EMR()
network.build_network()
cap=cv2.VideoCapture(0)
font=cv2.FONT_HERSHEY_SIMPLEX
feelings_faces=[]
for index, emotion in enumerate(EMOTIONS):
feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
while True:
ret, frame=cap.read()
facecasc=cv2.CascadeClassifier(
    'haarcascade_files/haarcascade_frontalface_default.xml')
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=facecasc.detectMultiScale(gray, 1.3, 5)
result=network.predict(format_image(frame))
if result is not None:
# write the different emotions and have a bar to indicate probabilities for each class
for index, emotion in enumerate(EMOTIONS): cv2.putText(frame, emotion, (10, index * 20 + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1);
cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index
+ 1) * 20 + 4), (255, 0, 0), -1)
# find the emotion with maximum probability and display it
maxindex=np.argmax(result[0])
font=cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, EMOTIONS[maxindex], (10, 360), font,
2, (255, 255, 255), 2, cv2.LINE_AA)
face_image=feelings_faces[maxindex]
print(face_image[:, :, 3])
for c in range(0, 3):
frame[200:320, 10:130, c]=face_image[:, :, c] * (face_image[:, :, 3] / 255.0) +
frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
if not len(faces) > 0:
# do nothing if no face is detected
a=1
else:
# draw box around face with maximum area
max_area_face=faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face=face
face=max_area_face
(x, y, w, h)=max_area_face
frame=cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
cv2.imshow('Video', cv2.resize(frame, None, fx=2,
           fy=2, interpolation=cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
breakcap.release()
cv2.destroyAllWindows()