def callback(self, imgmsg): face_cascade = cv2.CascadeClassifier( '/opt/ros/kinetic/share/OpenCV-3.3.1-dev/haarcascades/haarcascade_frontalface_alt.xml' ) #./cascades/haarcascade_frontalface_alt.xml') frame = self.bridge.imgmsg_to_cv2(imgmsg, "bgr8") faces = face_cascade.detectMultiScale(frame, scaleFactor=1.1, minNeighbors=5) bbox_list = [(x, y, w, h) for (x, y, w, h) in faces] #print("\n","----Return Face Coordinator ......... : ", bbox_list,"\n") fexpr_list = r.classify_image(self.emoC1.sess, self.emoC1.image, self.emoC1.processed_image, self.emoC1.processed_images, self.emoC1.probs, self.emoC1.logger, frame, bbox_list) for idx, bbox in enumerate(bbox_list): pos = (bbox[0], bbox[1]) xx = bbox[0] yy = bbox[1] wd = bbox[2] ht = bbox[3] answer = fexpr_list[idx][0] answer1 = str(fexpr_list[idx][1]) answer2 = ":" + str(bbox_list[idx]) # print('----', bbox_list[idx], "===", idx, fexpr_list[idx][0],fexpr_list[idx][1]) ## ============================================================ rospy.logout( "---------------Detecting Face Loop Starting ----------------------------" ) if (answer == 0): rospy.loginfo("-----------Anger :" + answer1 + answer2) self._pub1.publish("Anger :" + answer1 + answer2) elif (answer == 1): rospy.loginfo("---------Happy :" + answer1 + answer2) self._pub1.publish("Happy :" + answer1 + answer2) elif (answer == 2): rospy.loginfo("-------- Neutral :" + answer1 + answer2) self._pub1.publish("Neutral :" + answer1 + answer2) elif (answer == 3): rospy.loginfo("-------- Sad :" + answer1 + answer2) self._pub1.publish("Sad :" + answer1 + answer2) else: rospy.loginfo("----------------unkwone emotion :" + answer1 + answer2) self._pub1.publish("unkwone emotion :" + answer1 + answer2) r.draw_label(frame, answer, pos, wd, ht) #cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.rectangle(frame, pos, (xx + wd, yy + ht), (0, 0, 255), 2) #----------------20181129------------------------------------------------------------------------------- # adding report file in term of the statistic of face type at local # crontab -e self.newTime = int(time()) if (self.newTime - self.oldTime > 60): ans1 = float(answer1) ans1 = int(ans1 * 100) write_json_file('client_faceType_emotion.json', face_RepoFormat(answer, ans1)) self.oldTime = int(time()) #----------------20181129------------------------------------------------------------------------------- msg = self.bridge.cv2_to_imgmsg(frame, "bgr8") self._pub.publish(msg) #cv2.imshow("listener", frame) # cv2.waitKey(1) print( "---------------Detecting Loop Ending ............................." )
import cv2
if __name__ == "__main__": emoC1 = EmotionClassifier() #emotionclassifier1.__init__() #frame = cv2.imread('data/456.jpg') #breakupTeacherStudent.mp4')#Marquess-Vayamos.avi')#news_kp_partial.mp4#456.jpg ret, frame = cv2.VideoCapture('data/456.jpg').read() bbox_list = [(1096, 618, 26, 32), (814, 550, 35, 33), (998, 145, 65, 87), (65, 142, 67, 77), (313, 75, 73, 95)] #fexpr_list = r.classify_image(frame, bbox_list) fexpr_list = r.classify_image(emoC1.sess, emoC1.image, emoC1.processed_image, emoC1.processed_images, emoC1.probs, emoC1.logger, frame, bbox_list) print(fexpr_list) for idx, bbox in enumerate(bbox_list): #range(len(bbox_list)):ro #bbox = bbox_list[idx] pos = (bbox[0], bbox[1]) wd = bbox[2] ht = bbox[3] fexpr = fexpr_list[idx][0] r.draw_label(frame, fexpr, pos, wd, ht)
print("---Begin time :", Begin_localtime) fd.write("\n" + "Begin time :" + Begin_localtime) while (cap.isOpened()): # if(count <1): #count= count + 1 ret, frame = cap.read() #frame = cv2.imread('data/789.jpg') gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # you should adjust detectMultiScale parameter to detect faces faces = face_cascade.detectMultiScale(gray, 1.05, 5) if (len(faces) > 0): bbox_list = [(x, y, w, h) for (x, y, w, h) in faces] #--------------------------------- # for f_idx in range(len(frames)): fexpr_list = r.classify_image( sess, image, processed_image, processed_images, probs, logger, frame, bbox_list) #frames[f_idx], bbox_lists[f_idx]) #for b_idx in range(len(bbox_lists[f_idx])): for b_idx in range( len(bbox_list)): #b_idx #range( len(bbox_list) ): #faces bbox = bbox_list[b_idx] pos = (bbox[0], bbox[1]) wd = bbox[2] ht = bbox[3] #fexpr = fexpr_list[b_idx][0] fexpr = fexpr_list[b_idx][0] #r.draw_label(frames[f_idx], fexpr, pos, wd, ht) r.draw_label(frame, fexpr, pos, wd, ht) #cv2.imshow('image',frames[f_idx]) # red text