def detection(image_path): # parameters for loading data and images detection_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\haarcascade_frontalface_default.xml' font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (30, 60) gender_offsets = (10, 10) # loading models face_detection = load_detection_model(detection_model_path) # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') count1 = 0 faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) color = (0, 0, 255) draw_bounding_box(face_coordinates, rgb_image, color) count1 = count1 + 1 bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) #cv2.imwrite('C:\\Users\\l1f15bscs0049\\Desktop\\test_cases\\testt2.png', bgr_image) return count1
def get_emotion(self, image_path_, face_detection, emotion_classifier, gender_classifier): #print(face_detection) emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape # change for variance gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] #By default, set emotion to "Neutral" and gender to "Unknown" emotion_text = "happy" gender_text = "Unknown" # loading images rgb_image = load_image(image_path_, grayscale=False) gray_image = load_image(image_path_, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax( emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] return emotion_text, gender_text
def detect(self, image_path): # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') res = [] faces = detect_faces(self.face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, self.gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (self.gender_target_size)) gray_face = cv2.resize(gray_face, (self.emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = self.gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = self.gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax( self.emotion_classifier.predict(gray_face)) emotion_text = self.emotion_labels[emotion_label_arg] if gender_text == self.gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) res.append({ 'left': x1, 'top': y1, 'right': x2, 'bottom': y2, 'male': gender_text, 'emotion': emotion_text }) # print(gender_text, emotion_text) # draw = ImageDraw.Draw(img) # draw.rectangle(((x1, y1), (x2,y2)), outline='red') # draw.text((0, 0), "something123") return res
def recognition(f_2_s): # parameters for loading data and images #image_path = path_r image_path = image_handler(f_2_s) detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' emotion_labels = get_labels('fer2013') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = "" emotion_text = emotion_labels[emotion_label_arg] color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도 draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2) #if(emotion_text == ""): #recognition(f_2_s) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('../images/predicted_test_image.png', bgr_image) # 변수 활용 check_recoged_img('../images/predicted_test_image.png')
def run_classify(image_path): emotion_text = '' gender_text = '' # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite(os.path.join(dir, '../images/predicted_test_image.png'), bgr_image) return ([gender_text, emotion_text])
def make_label(image_path): # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) return emotion_text
def getFaceEmotion(image_path): # parameters for loading data and images base = 'C://Users/lenovo/Desktop/moodify/components/emotion-classification' detection_model_path = base + '/trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = base + '/trained_models/emotion_models/fer2013_denseNet.59-0.68.hdf5' emotion_labels = get_labels('fer2013') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -10, 0.5, 2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('predicted_test_image.png', bgr_image) return emotion_text
def handleRequest(): image_url = "" try: image_url = request.forms.get('image_url') print "THE IMAGE: ", image_url except: print "[ERR] Dead Image" return "-1" image_path = cStringIO.StringIO(urllib.urlopen(image_url).read()) # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: print "Inside" x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] print "Predicted Gender: ", gender_text if gender_text == gender_labels[0]: print "[LOG] Man" else: print "[LOG] Woman" return "1" return "0"
def predict(image_path): # loading images start = time.time() rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') with graph.as_default(): faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -10, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -40, 1, 2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) resized_image = cv2.resize(bgr_image, (1024, 769)) file_path = 'result/' + image_path.split('/')[-1] cv2.imwrite(file_path, resized_image) end = time.time() logging.info('[Time] for prediction(): {}'.format(end - start)) return file_path
def get_gender(self, image_path_, face_detection, emotion_classifier, gender_classifier): gender_labels = get_labels('imdb') # hyper-parameters for bounding boxes shape # change for variance gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models # getting input model shapes for inference gender_target_size = gender_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path_, grayscale=False) gray_image = load_image(image_path_, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] rgb_face = rgb_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) return gender_text
def on_data(data): f = open('current.jpg', 'wb') f.write(base64.decodebytes(data)) f.close() image_path = "current.jpg" out = [] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] color = (0, 0, 255) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('predicted.png', bgr_image) data = open('predicted.png', 'rb').read() encoded = base64.encodebytes(data).decode('utf-8') out.append({ 'image': encoded, 'emotion': emotion_text, }) return out
def detect(net, meta, image, thresh=.2, hier_thresh=.5, nms=.45): im = load_image(image, 0, 0) num = c_int(0) pnum = pointer(num) predict_image(net, im) dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum) num = pnum[0] if (nms): do_nms_obj(dets, num, meta.classes, nms) res = [] for j in range(num): for i in range(meta.classes): if dets[j].prob[i] > 0: b = dets[j].bbox res.append( (meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h))) res = sorted(res, key=lambda x: -x[1]) free_image(im) free_detections(dets, num) return res
gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size))
def emotion_identify(img_url): # parameters for loading data and images detection_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' gender_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] # loading images image_path = img_url rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) if len(faces) == 0: print("No face") K.clear_session() return False emotions = collections.defaultdict(int) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] emotions[emotion_text] += 1 if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) max_num = 0 max_emotion = None for key, value in emotions.items(): if value > max_num: max_num = value max_emotion = key print("The emotion of this picture is: ", max_emotion) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('./result_images/predicted_test_image.png', bgr_image) K.clear_session() return max_emotion
def recognize_old(self): #rf:[90, euqMean,'emb'] # ,grb:[112, 'euqMean','emb'] #,svc:[80,np.mean,'emb'] #,lsvc:[91,meanEuq,'emb'] # ,ada:[90,'euqMean','emb'] # ,bag:[97,'euqMean','emb'] # ,xtr:[51,'euqMean','emb'] if self.rfVar.get(): clf_dict[rf]=[90, euqMean,'emb'] if self.grbVar.get(): clf_dict[grb]=[112, euqMean,'emb'] if self.adaVar.get(): clf_dict[ada]=[90,euqMean,'emb'] if self.bagVar.get(): clf_dict[bag]=[97,euqMean,'emb'] if self.xtrVar.get(): clf_dict[xtr]=[51, euqMean,'emb'] if self.lsvcVar.get(): clf_dict[lsvc]=[91,meanEuq,'emb'] if self.svcVar.get(): clf_dict[svc]=[80,np.mean,'emb'] dir_to_save='./crops/' #new_photo=detect_faces(self.path, dir_to_save) #cv2.imwrite(dir_to_save+os.path.basename(self.path)+'new.bmp', new_photo) faces=glob.glob(dir_to_save+'*'+os.path.basename(self.path)+'*') for image_path in faces: print('taking '+image_path+' into account') try: gray_image = load_image(image_path, grayscale=True) except: continue gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') gray_face = gray_image gray_face = cv2.resize(gray_face, (emotion_target_size)) try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: print('exception ignored') gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, axis=0) gray_face = np.expand_dims(gray_face, -1) # if 'fer' in layer_label: # gray_face = np.expand_dims(gray_face, 0) # if 'fer' not in layer_label: # gray_face = np.expand_dims(gray_face, -1) dict_faces[str(image_path)]=emotion_classifier.predict(gray_face) # print(dict_faces) answers={'positive':0, 'neutral':1, 'negative':2} ans_arr=[] for clf in clf_dict: pca=pickle.load(open('best_pca_'+clf.__class__.__name__+'_n'+str(clf_dict[clf][0]),'rb')) vectors=[] for key in dict_faces.keys(): vectors.append(dict_faces[key]) vectors=np.reshape(vectors,(len(vectors),-1)) print(clf.__class__.__name__) print(clf_dict[clf][1].__class__.__name__) feature_vector=clf_dict[clf][1](vectors, axis=0) # print('pca_after\n') # print(pca.transform(np.expand_dims(feature_vector, axis=0))[0]) # print('pca_before\n') # print(feature_vector) ans_arr.append(answers[clf.predict(pca.transform(np.expand_dims(feature_vector, axis=0)))[0]]) self.emotion_label.config(text=('Emotion: '+self.decision_make(ans_arr)).upper(), font='Times 30') im = Image.open(dir_to_save+os.path.basename(self.path)+'new.bmp') tkimage = ImageTk.PhotoImage(im) self.myvar.config(image = tkimage) self.myvar.image = tkimage self.myvar.pack()
def recognition(f_2_s): #이중 조건문을 탈출하기 위해서, goto문이 불가능하다 파이썬은.. tt = False # 이중 조건문 탈출 위한 변수 설정 emotion_c = "" # 이중 조건문 탈출 위한 변수 설정 #recap == False # parameters for loading data and images #image_path = path_r image_path = image_handler(f_2_s) # 저장 여부 확인까지 완료한 뒤에 저장된 사진의 경로를 반환,받음 # 학습된 모델과 감정labels의 경로를 설정해준 부분 detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' emotion_labels = get_labels('fer2013') font = cv2.FONT_HERSHEY_SIMPLEX #폰트 --> emotion정보 보여줄 때 사용 # hyper-parameters for bounding boxes shape emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: #pyautogui.confirm(text='one more', title='test', buttons=['ok', 'exit']) #recap = True continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = "" emotion_text = emotion_labels[emotion_label_arg] #감정인식이 성공되면 감정 상태를 물어보고, 감정 확인 후 저장 or 탈출 tof = pyautogui.confirm(text='Are you ' + emotion_text + '?', title=emotion_text, buttons=['yes', 'no' ]) # 인식된 감정의 정답 여부 질문, 사용자의 입력을 받음 if (tof == 'yes'): # 알림 창의 yes 버튼을 눌렀을 때 tt = True # 이중 조건문 탈출 위해 emotion_c = emotion_text color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도 #draw_bounding_box(face_coordinates, rgb_image, color) #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2) elif (tof == 'no'): # 알림 창의 no 버튼을 눌렀을 때 tt = False break #color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도 #draw_bounding_box(face_coordinates, rgb_image, color) #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2) if (tt == True): # yes 버튼을 눌렀을 때는 그 감정 여부에 맞는 파일명을 지어서 사진 저장. bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) next_num = next_index( emotion_text) # 인식된 감정 상태와 같은 파일들이 몇개 있는지 정보 얻고 다음 숫자가 저장됨 cv2.imwrite('../src/' + emotion_text + 'z' + str(next_num) + '.jpg', bgr_image) # 새로운 감정 인식 사진이 생성된다. f = open(emotion_text + 'z' + str(next_num) + ".txt", 'w', encoding="UTF8") # 그에 매칭되는 일기장도 생성. f.close() # 이후 인식 된 얼굴 범위와 감정 정보를 화면을 통해 사용자에게 보여줌 img = cv2.imread(image_path, cv2.IMREAD_COLOR) draw_bounding_box(face_coordinates, img, color) draw_text(face_coordinates, img, emotion_text, color, 0, -30, 1.5, 2) while (True): # 키 입력을 기다리며 화면 정지 cv2.imshow(image_path, img) if cv2.waitKey(1) > 0: break # 체크가 완료되면 함수 탈출. #check_recoged_img('../src/'+ emotion_text +'z'+ str(next_num) +'.jpg') --이것은 얼굴에 사각형, 감정정보 입혀진 사진 저장 else: # 알림 창을 띄워서 인식 된 감정이 없다는 것을 알려줌 -->> (인식의 오류 or 사용자가 생각한 감정과의 불일치 시) pyautogui.alert(text='no emtion captured', title='error', button='OK')
def main_thread(self): if self.data_bridge.processing_chosen_by_radio_butten == 'img': flag=0 detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] while self.data_bridge.start_process_manager and flag==0: flag=1 image_path = self.data_bridge.selected_video_file_path font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('../images/predicted_test_image.png', bgr_image) print("File has been stored in Images folder") print("Press stop processing to exit") self.gui_root.update() if( (self.data_bridge.processing_chosen_by_radio_butten == 'vid') or (self.data_bridge.processing_chosen_by_radio_butten=='web')): detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') # Models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] while self.data_bridge.start_process_manager: font = cv2.FONT_HERSHEY_SIMPLEX frame_window = 10 gender_offsets = (30, 60) emotion_offsets = (20, 40) gender_window = [] emotion_window = [] # starting video streaming cv2.namedWindow('Window_frame') if self.data_bridge.processing_chosen_by_radio_butten=='vid': self.cap=cv2.VideoCapture(self.data_bridge.selected_video_file_path) else: self.cap = cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('Save.avi', fourcc, 20.0, (720, 480)) while self.data_bridge.start_process_manager: ret, bgr_image = self.cap.read() gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY) rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue gray_face = preprocess_input(gray_face, False) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] emotion_window.append(emotion_text) rgb_face = np.expand_dims(rgb_face, 0) rgb_face = preprocess_input(rgb_face, False) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gender_window.append(gender_text) if len(gender_window) > frame_window: emotion_window.pop(0) gender_window.pop(0) try: emotion_mode = mode(emotion_window) gender_mode = mode(gender_window) except: continue if gender_text == gender_labels[0]: color = (0, 0, 0) else: color = (0, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_mode, color, 0, -20, 1, 1) draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45, 1, 1) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imshow('Window_frame', bgr_image) self.gui_root.update() self.cap.release() cv2.destroyAllWindows()
for row in csv.DictReader(data): value = str(row['Usage']) if value == 'PrivateTest': emotion = int(row['emotion']) labels_Pritest.append(emotion) # garbage collection gc.collect() # loading images test_img = np.zeros((len(labels_Pritest), 96, 96)) j = count = 0 show = [] for i in os.listdir(img_fold_path): test = np.squeeze( load_image(os.path.join(img_fold_path, i), grayscale=True)).astype('uint8') faces = detect_faces(face_detection, test) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) test2 = test[x1:x2, y1:y2] try: test2 = transform.resize(test2, emotion_target_size) except: continue test2 = np.expand_dims(test2, 0) test2 = np.expand_dims(test2, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(test2)) if labels_Pritest[j] == emotion_label_arg: show.append(j) test_img[count] = test
def getFaceEmotion(image_path): # parameters for loading data and images #image_path = sys.argv[1] base = '/Users/macbookair/desktop/ms/project/components/emotion-classification' #base = '/Users/macbookair/desktop/MS/Project/components/emotion-classification'; detection_model_path = base + '/trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = base + '/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' gender_model_path = base + '/trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) print(gender_text) print(emotion_text) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('predicted_test_image.png', bgr_image) return emotion_text
def get_sim_percent(image_1, image_2, target_class): # parameters for loading data and images # image_path = sys.argv[1] detection_model_path = 'trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = 'trained_models/fer2013_big_XCEPTION.54-0.66.hdf5' gender_model_path = 'trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (20, 40) gender_offsets = (10, 10) emotion_offsets = (10, 30) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] target_emo = target_class image_path = image_1 # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = emotion_classifier.predict(gray_face)[0][target_emo] emotion_percent = str(round(emotion_label_arg*100, 2)) emotion_text = emotion_labels[target_emo] + ' - '+ emotion_percent # print(target_emo) # print(emotion_text) if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, -20, 30, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, 0, 1, 2) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('predicted_test_image.png', bgr_image) return emotion_percent
gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, color_mode="rgb") gray_image = load_image(image_path, color_mode="grayscale") gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size))
def exe_c(path): image_path = path #!!!获取图片路径的赋值到image_path # parameters for loading data and images 加载数据和图片的参数 #sys.argv[1] img_i=1 detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape 边框大小——的超参数 #gender_offsets = (18, 25) gender_offsets = (10, 10) #emotion_offsets = (18, 25) emotion_offsets = (0, 0) # loading models 加载模型__face_detection,emotion_classifier,gender_classifier face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference 获取输入模型的引用 emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] # loading images 加载图片 rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) #检测脸(face_detection,灰度图像?) Jsons = [] for inx, face_coordinates in enumerate(faces): #enumerate 循环获取出inx值 x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] #jsonp['gender_text'] = gender_text #添加jsonp的gender标签 gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] # jsonp['emotion_text'] = emotion_text #添加jsonp的emotion标签 if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) #以下是bound列表转换成字典的方法 bound_key=['x','y','w','h'] c_x,c_y,c_w,c_h=face_coordinates bound_xy=[str(c_x),str(c_y),str(c_w),str(c_h)] bound_box = dict(zip(bound_key, bound_xy)) print(gender_text) Jsons.append({'person':inx,"gender":gender_text,"emotion":emotion_text,'bound_box':bound_box}) #循环添加jsonp字典到jsons列表 bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) img_i+=1 cv2.imwrite('../images/upload/result/predict_img'+str(img_i)+'.png', bgr_image) #将opencv2写出图像到指定路径 #{'gender':gender_text,'emotion':emotion_text} return Jsons
def recognition(f_2_s): tt = False #이중 조건문을 탈출하기 위해서, goto문이 불가능하다 파이썬은.. emotion_c = "" #recap == False # parameters for loading data and images #image_path = path_r image_path = image_handler(f_2_s) detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' emotion_labels = get_labels('fer2013') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: #pyautogui.confirm(text='one more', title='test', buttons=['ok', 'exit']) #recap = True continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) emotion_text = "" emotion_text = emotion_labels[emotion_label_arg] #감정인식이 성공되면 감정 상태를 물어보고, 감정 확인 후 저장 or 탈출 tof = pyautogui.confirm(text='Are you ' + emotion_text + '?', title=emotion_text, buttons=['yes', 'no']) if (tof == 'yes'): tt = True emotion_c = emotion_text color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도 draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2) elif (tof == 'no'): tt = False break #color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도 #draw_bounding_box(face_coordinates, rgb_image, color) #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2) if (tt == True): bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) cv2.imwrite('../src/' + emotion_text + '.jpg', bgr_image) # 변수 활용 check_recoged_img('../src/' + emotion_text + '.jpg') else: pyautogui.alert(text='no emtion captured', title='error', button='OK')
def main(): protoFile = "C:\\Users\\asus\\Documents\\major_project\\Project\\openpose-master\\openpose-master\\models\\pose\\coco\\pose_deploy_linevec.prototxt" weightsFile = "C:\\Users\\asus\\Documents\\major_project\\Project\\openpose-master\\openpose-master\\models\\pose\\coco\\pose_iter_440000.caffemodel" nPoints = 18 # COCO Output Format keypointsMapping = [ 'Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr', 'R-Hip', 'R-Knee', 'R-Ank', 'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye', 'L-Eye', 'R-Ear', 'L-Ear' ] POSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 17], [5, 16]] # index of pafs correspoding to the POSE_PAIRS # e.g for POSE_PAIR(1,2), the PAFs are located at indices (31,32) of output, Similarly, (1,5) -> (39,40) and so on. mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], [55, 56], [37, 38], [45, 46]] colors = [[0, 100, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255], [0, 0, 255], [255, 0, 0], [200, 200, 0], [255, 0, 0], [200, 200, 0], [0, 0, 0]] detection_model_path = 'C:\\Users\\asus\\Documents\\major_project\\Project\\face_classification-master\\face_classification-master\\trained_models\\detection_models\\haarcascade_frontalface_default.xml' emotion_model_path = 'C:\\Users\\asus\\Documents\\major_project\\Project\\face_classification-master\\face_classification-master\\trained_models\\emotion_models\\fer2013_mini_XCEPTION.102-0.66.hdf5' emotion_labels = get_labels('fer2013') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] while True: poll() print(image_queue) # parameters for loading data and images image_name = image_queue.get()[0] print(image_name) image_path = get_image(image_name) #sys.argv[1] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') faces = detect_faces(face_detection, gray_image) cat_count = { 'angry': 0, 'disgust': 0, 'fear': 0, 'happy': 0, 'sad': 0, 'surprise': 0, 'neutral': 0 } total_count = 0 for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax( emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] cat_count[emotion_text] = cat_count[emotion_text] + 1 total_count = total_count + 1 color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) cv2.imwrite("../pose_images/" + image_name, rgb_image) upload_file("../pose_images/" + image_name, 'major-project-processed-images', image_name) connection = pymysql.connect(host, user=user, port=port, passwd=password, db=dbname) cursor = connection.cursor() cursor.execute( '''INSERT INTO `expressions`(`name`, `happy`, `angry`, `sad`, `suprised`, `fear`,`neutral`,`disgust`,`total`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)''', (image_name, cat_count['happy'], cat_count['angry'], cat_count['sad'], cat_count['surprise'], cat_count['fear'], cat_count['neutral'], cat_count['disgust'], total_count)) cursor.execute("commit") #bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) #cv2.imwrite('../images/predicted_test_image.png', bgr_image) #pose estimation code........ image1 = cv2.imread(image_path) frameWidth = image1.shape[1] frameHeight = image1.shape[0] #t = time.time() net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile) # Fix the input Height and get the width according to the Aspect Ratio inHeight = 368 inWidth = int((inHeight / frameHeight) * frameWidth) inpBlob = cv2.dnn.blobFromImage(image1, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False) net.setInput(inpBlob) output = net.forward() #print("Time Taken in forward pass = {}".format(time.time() - t)) detected_keypoints = [] keypoints_list = np.zeros((0, 3)) keypoint_id = 0 threshold = 0.1 keypoint_location = {} for part in range(nPoints): probMap = output[0, part, :, :] probMap = cv2.resize(probMap, (image1.shape[1], image1.shape[0])) keypoints = getKeypoints(probMap, threshold) print("Keypoints - {} : {}".format(keypointsMapping[part], keypoints)) keypoints_with_id = [] for i in range(len(keypoints)): keypoints_with_id.append(keypoints[i] + (keypoint_id, )) keypoints_list = np.vstack([keypoints_list, keypoints[i]]) keypoint_location[keypoint_id] = keypoints[i] keypoint_id += 1 detected_keypoints.append(keypoints_with_id) frameClone = image1.copy() for i in range(nPoints): for j in range(len(detected_keypoints[i])): cv2.circle(frameClone, detected_keypoints[i][j][0:2], 5, colors[i], -1, cv2.LINE_AA) #cv2.imshow("Keypoints",frameClone) valid_pairs, invalid_pairs = getValidPairs(output, frameWidth, frameHeight, mapIdx, detected_keypoints, POSE_PAIRS) personwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs, keypoints_list, mapIdx, POSE_PAIRS) for i in range(17): for n in range(len(personwiseKeypoints)): index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])] if -1 in index: continue B = np.int32(keypoints_list[index.astype(int), 0]) A = np.int32(keypoints_list[index.astype(int), 1]) cv2.line(frameClone, (B[0], A[0]), (B[1], A[1]), colors[i], 3, cv2.LINE_AA) cv2.imwrite("../pose_images/image0.jpg", frameClone) class_status = classStatus(personwiseKeypoints, keypoint_location) print(class_status) cursor.execute( '''INSERT INTO `gestures`(`name`,`left_turned`, `right_turned`, `back_turned`, `raised_hands`, `total`) VALUES (%s, %s, %s, %s, %s, %s )''', (image_name, class_status['turnedleft'], class_status['turnedright'], class_status['turnedback'], class_status['raisedhands'], class_status['classtotal'])) cursor.execute("commit") #end of pose estimation code...................... cursor.execute( ''' UPDATE `images` SET `isprocessed`=1 WHERE `name`=%s''', (image_name)) cursor.execute("commit") cursor.close() connection.close()
def predict(image_folder_path, emotion_kind): detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5' emotion_labels = get_labels('fer2013') target_file = '../result/predicted_' + EmotionName[emotion_kind] + '.txt' gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (30, 60) gender_offsets = (10, 10) emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] image_path = dir_data_folder(image_folder_path) predicted_label = [] for num in range(len(image_path)): # print the process info if print_switch == 0: print('deal with the ' + image_path[num]) # loading images rgb_image = load_image(image_path[num], grayscale=False) gray_image = load_image(image_path[num], grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') # pdb.set_trace() faces = detect_faces(face_detection, gray_image) emotion_label_arg = -1 for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) gray_face = cv2.resize(gray_face, (emotion_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] gray_face = preprocess_input(gray_face, True) gray_face = np.expand_dims(gray_face, 0) gray_face = np.expand_dims(gray_face, -1) emotion_label_arg = np.argmax( emotion_classifier.predict(gray_face)) emotion_text = emotion_labels[emotion_label_arg] if single_set_draw_switch == 0: if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) # draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) draw_text(face_coordinates, rgb_image, emotion_text, color, 0, 20, 1, 2) if single_set_draw_switch == 0: bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) filename = image_path[num].split('/')[-1] filename = filename.split('.')[0] newname = '../predicted_image/' + 'predicted_' + filename + '.png' cv2.imwrite(newname, bgr_image) # add the predicted label predicted_label.append(emotion_label_arg) unrecognized_cnt = predicted_label.count(-1) true_cnt = predicted_label.count(emotion_kind) total_cnt = len(image_path) face_cnt = total_cnt - unrecognized_cnt face_recog_ratio = face_cnt / float(total_cnt) total_accuracy = true_cnt / float(total_cnt) recog_accuracy = true_cnt / float(face_cnt) f = open(target_file, 'w+') #with open(target_file,'w') as f: f.write(EmotionName[emotion_kind] + ' count = ' + str(true_cnt) + '\n') f.write(EmotionName[emotion_kind] + ' total count = ' + str(total_cnt) + '\n') f.write(EmotionName[emotion_kind] + '_accuracy in total: ' + str(total_accuracy) + '\n') f.write(EmotionName[emotion_kind] + '_accuracy in recognized: ' + str(recog_accuracy) + '\n') f.write('face_recognize_count : ' + str(face_cnt) + '\n') f.write('face_recognize_ratio : ' + str(face_recog_ratio) + '\n') for label in predicted_label: f.write(str(label) + '\n') f.close() return [ true_cnt, total_cnt, face_cnt, total_accuracy, face_recog_ratio, recog_accuracy ]
"""RAF_DB""" emotion_model_path = '../trained_models/emotion_models/VGG16_Dense_RAF_20190714.h5' # 选择加载模型 emotion_labels = [ 'Surprise', 'Fear', 'Disgust', 'Happy', 'Sad', 'Angry', 'Neutral' ] """FER2013""" # emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] # emotion_model_path = '../trained_models/emotion_models/fer2013_vgg16_tf.h5' isgray = False detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' face_detection = cv2.CascadeClassifier(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=True) emotion_target_size = emotion_classifier.input_shape[1:3] rgb_image = load_image(image_path, color_mode='rgb') gray_image = load_image(image_path, color_mode='grayscale') if isgray: # gray_image = rgb2gray(rgb_image) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') face_image = gray_image else: rgb_image = np.squeeze(rgb_image).astype('uint8') face_image = rgb_image emotion_offsets = (0, 0) # face = gray_image faces = detect_faces(face_detection, face_image)
emotion_offsets = (20, 40) emotion_offsets = (0, 0) # loading models face_detection = load_detection_model() emotion_classifier = load_model(emotion_model_path, compile=False) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference emotion_target_size = emotion_classifier.input_shape[1:3] gender_target_size = gender_classifier.input_shape[1:3] start = time.time() # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') detected_faces, score, idx = detect_faces(face_detection, gray_image) for detected_face in detected_faces: face_coordinates = make_face_coordinates(detected_face) x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) gray_face = gray_image[y1:y2, x1:x2]
def gender_detection(image_path): # parameters for loading data and images detection_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\haarcascade_frontalface_default.xml' gender_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\simple_CNN.81-0.96.hdf5' gender_labels = get_labels('imdb') font = cv2.FONT_HERSHEY_SIMPLEX # hyper-parameters for bounding boxes shape gender_offsets = (30, 60) gender_offsets = (10, 10) # loading models face_detection = load_detection_model(detection_model_path) gender_classifier = load_model(gender_model_path, compile=False) # getting input model shapes for inference gender_target_size = gender_classifier.input_shape[1:3] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') #creating a file save_path = 'C:\\Users\\l1f15bscs0049\\Desktop' completeName = os.path.join(save_path, "hellojee.txt") file = open(completeName, "a") faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] #print(gender_label_arg) file.write(str(gender_label_arg)) file.write("\n") if gender_text == gender_labels[0]: color = (0, 0, 255) else: color = (255, 0, 0) draw_bounding_box(face_coordinates, rgb_image, color) draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) #print(gender_label_arg) bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) #cv2.imwrite('C:\\Users\\l1f15bscs0049\\Desktop\\a.png', bgr_image) print('\n\tGender Detection Done') file.close() #check men women count from collections import Counter with open(completeName, "r") as f: cd = Counter(int(line.split(None, 1)[0]) for line in f) #print(cd) women_count = cd[0] men_count = cd[1] # print(women_count) #print(men_count) #print(cd[0]) #print(cd[1]) os.remove(completeName) print("file removed") #call a wrapper function if (women_count > men_count): print("Women detected") Wrapper_func(0) elif (men_count > women_count): print("men detected") Wrapper_func(1) else: print("no Detection\n Random Ad's playing\n") random_ads() file.close()