def test(): # images, labels, images_testing, labels_testing = loadData() images_testing, labels_testing = loadDate('./emotionDataset/test') network = EmotionRecognition() # network.build_network() network.load_model2() print('[+] Testing load model') result = network.predict(images_testing) # num = 0 y_true = [] y_pred = [] for label in range(7): for ii in range(len(result)): pre = list(result[ii]).index(np.max(result[ii])) gt = list(labels_testing[ii]).index(np.max(labels_testing[ii])) # y_true.append(gt) # y_pred.append(pre) # target_names = ['class 0', 'class 1', 'class 2', 'class 3', 'class 4', 'class 5', 'class 6'] # print(classification_report(y_true, y_pred, target_names=target_names)) if gt == label: y_true.append(1) else: y_true.append(0) if pre == label: y_pred.append(1) else: y_pred.append(0) recall = recall_score(y_true, y_pred) precision = precision_score(y_true, y_pred) accuracy = accuracy_score(y_true, y_pred) f1_measure = f1_score(y_true, y_pred) similarity = jaccard_similarity_score(y_true, y_pred) print('###label :%d' % label) print( 'recall:%s precision:%s similarity:%s F1_measure:%s accuracy:%s' % (recall, precision, similarity, f1_measure, accuracy)) print( "=========================================================================" ) y_true = [] y_pred = []
def detect(): video_capture = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX feelings_faces = [] for index, emotion in enumerate(EMOTIONS): feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1)) voting_1 = [] cal_1 = [] voting_2 = [] cal_2 = [] count = 0 network = EmotionRecognition() # network.build_network() network.load_model2() print('[+] Testing load model') while True: # Capture frame-by-frame ret, frame = video_capture.read() #cv2.imshow('Video', frame) cv2.imwrite("%05d.jpg", frame) # Predict result with network result = network.predict(format_image(frame)) # Draw face in frame # for (x,y,w,h) in faces: # cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2) # Write results in frame if result is not None: for index, emotion in enumerate( EMOTIONS): # 3 顯示每一frame之偵測資訊(文字、直方圖) cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1) cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4), (255, 0, 0), -1) count += 1 print(result[0].tolist().index(max(result[0]))) voting_1.append(result[0].tolist().index(max(result[0]))) if len(voting_1) == 60: for i in range(7): cal_1.append(voting_1.count(i)) maximum_face_times = np.max(cal_1) maximum_face = cal_1.index(maximum_face_times) # print(maximum_face, maximum_face_times) voting_2.append(maximum_face) voting_1.clear() cal_1.clear() if ( len(voting_2) == 7 ): # 之後改次數--!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! for i in range(7): cal_2.append(voting_2.count(i)) maximum_face_times_2 = np.max(cal_2) maximum_face_2 = cal_2.index(maximum_face_times_2) print("Voting 結果︰第", maximum_face_2, "類") face_image = feelings_faces[maximum_face_2] # emotion = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral'] for c in range(0, 3): frame[200:320, 10:130, c] = face_image[:, :, c] * ( face_image[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0) voting_2.clear() cal_2.clear() ###response from pygame import mixer import random mixer.init() mixer.music.load( join('./response/', str(maximum_face_2)) + '/' + str(random.randint(0, 3)) + '.mp3') mixer.music.play() insert_mongo(int(maximum_face_2)) # connection = MongoClient('140.138.145.77', 27017) # connection.admin.authenticate("bigmms", "bigmms1413b") # tdb = connection.musicky # post = tdb.test # # # for i in tdb.test.find({"emotion": EMOTIONS[maximum_face_2]}): print(i) # pipeline = [{"$match": {"emotion": EMOTIONS[maximum_face_2]}}, # {"$sample": {"size": 1}}] # 隨機取出一個"emotion":"sad"的資料 # data = list(tdb.test.aggregate(pipeline)) # print(data) # # a = str(data) # # delete = ["[", "{", "}", "]", "\'"] # 刪除不必要的符號 # for i in range(len(delete)): # a = a.replace(delete[i], "") # # replace = [": ", ", "] # 替換不必要的符號 # for j in range(len(replace)): # a = a.replace(replace[j], ",") # # a = a.split(",") # 以逗號區分不同字串 # rand_keyword = a[a.index("keyword") + 1] # 根據不同的情緒,抓出所要使用的keyword # print(rand_keyword) # keyword = rand_keyword # keyword = urllib.parse.quote(rand_keyword) # url = "https://www.youtube.com/results?search_query=" + keyword # crawler(url) if ( count >= 420 ): # and (count%180)<=20): # Ugly transparent fix # 表情圖片顯示於螢幕 (停留20 frames) for c in range(0, 3): frame[200:320, 10:130, c] = face_image[:, :, c] * ( face_image[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0) # # Display the resulting frame cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() cv2.destroyAllWindows()