Ejemplo n.º 1
0
 def loadLearntFaces(self):
     path = self.FACE_FOLDER
     if os.path.exists(path):
         fileList = os.listdir(path)
         for file in fileList:
             label = os.path.splitext(file)[0]
             if label in self.faceList:
                 rospy.logwarn("DUPICATE FACE LABEL file_name:" + file +
                               ", label:" + str(label))
             if (file != '.gitkeep'):
                 rospy.loginfo("file_name:" + file + ", label:" +
                               str(label))
                 #print("check --------------->:"+str(path+file))
                 face_image = face_recognition.load_image_file(path + "/" +
                                                               file)
                 face_recognition_list = face_recognition.face_encodings(
                     face_image)
                 rospy.loginfo(len(face_recognition_list))
                 if len(face_recognition_list) > 0:
                     face_encoding = face_recognition_list[0]
                     current_face = Face.Face(0, 0, 0, 0, label, 0)
                     current_face.encode(face_encoding)
                     self.faceList[label] = current_face
     else:
         rospy.logerr(
             "Unable to load face references, no such directory: " +
             str(path))
         return
 def _processLearnFace(self,top, right, bottom, left,face_encoding,label_tmp,frame,new_learnt_face):
     #save file to learn directory and crop according box
     cv2.imwrite(self.FACE_FOLDER_AUTO+"/"+label_tmp+".png", frame[top:bottom, left:right])
     new_face=Face.Face(0,0,0,0,label_tmp,0)
     new_face.encode(face_encoding)
     self.faceList[label_tmp]=new_face
     new_learnt_face.append(new_face)
Ejemplo n.º 3
0
 def _processLearnFace(self, top, right, bottom, left, face_encoding,
                       label_tmp, frame, new_learnt_face):
     #self.mutex.acquire()
     if self.mutex.acquire(False):
         try:
             #save file to learn directory and crop according box
             cv2.imwrite(self.FACE_FOLDER_AUTO + "/" + label_tmp + ".png",
                         frame[top:bottom, left:right])
             new_face = Face.Face(0, 0, 0, 0, label_tmp)
             new_face.encode(face_encoding)
             self.faceList[label_tmp] = new_face
             new_learnt_face.append(new_face)
         finally:
             self.mutex.release()
     else:
         rospy.loginfo(
             " lock is already aquired, learning is already in progress")
Ejemplo n.º 4
0
    def process_img_face(self, data, name_w, current_status, isImgFace):
        detected_faces_list = []
        new_learnt_face = []
        face_locations = []
        label_r = 'NONE'
        if (current_status == None):
            current_status = self.STATUS
        try:
            # Conver image to numpy array
            frame = self._bridge.imgmsg_to_cv2(data, 'bgr8')
            frame_copy = self._bridge.imgmsg_to_cv2(data, 'bgr8')

            if not isImgFace:
                if self.face_detection_mode == 2:
                    face_locations = face_recognition.face_locations(
                        frame, number_of_times_to_upsample=0, model="cnn")
                elif self.face_detection_mode == 1:
                    face_locations = face_recognition.face_locations(frame)
                else:
                    face_locations = self.cascadeHaarFaceDetection.processImg(
                        frame)
                    #rospy.logwarn(face_locations)
            else:
                face_locations = []
                face_locations.append((long(0), long(0 + frame.shape[0]),
                                       long(0 + frame.shape[1]), long(0)))

            i = 0
            for location in face_locations:
                i = i + 1

            if i == 0:
                rospy.logdebug("NO FACE DETECTED ON THE CURRENT IMG")

            #if isImgFace:
            #    face_encodings = face_recognition.face_encodings(frame)
            #else:
            face_encodings = face_recognition.face_encodings(
                frame, face_locations)

            # Find all the faces and face enqcodings in the frame of video
            top_r = 0
            bottom_r = 0
            left_r = 0
            right_r = 0

            # Loop through each face in this frame oisFaceDetectionf video
            for (top, right, bottom,
                 left), face_encoding in zip(face_locations, face_encodings):
                # See if the face is a match for the known face(s)

                name, distance = self._processDetectFace(
                    top, right, bottom, left, face_encoding)
                current_face = Face.Face(top, left, bottom, right, name,
                                         distance)
                detected_faces_list.append(current_face)

                rospy.logdebug("STATUS: " + current_status)
                if (current_status == self.LEARNING_STATUS
                        and name == "Unknown") or (
                            self.continuous_learn and name == "Unknown") or (
                                current_status == self.FORCE_LEARNING_STATUS):

                    label_tmp = str(uuid.uuid1())
                    rospy.loginfo("unknown face: launch learn operation")
                    self._processLearnFace(top, right, bottom, left,
                                           face_encoding, label_tmp,
                                           frame_copy, new_learnt_face)
                label_r = name
                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255),
                              2)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                              (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame,
                            str(round(distance * 100, 1)) + "% " + name,
                            (left + 6, bottom - 6), font, 0.8, (255, 255, 255),
                            1)

                x0, y0 = self._processBoxCenter(top, right, bottom, left)
                cv2.circle(frame, (x0, y0), 5, (0, 255, 0), cv2.FILLED)
                top_r = top
                bottom_r = bottom
                left_r = left
                right_r = right
                #return frame,name,top,left,bottom,right

            #update label
            if name_w == None:
                _labelToLearn = self.labelToLearn
            else:
                _labelToLearn = name_w

            #check the biggest face learnt
            if not self.continuous_learn:
                max_box = 0
                biggest_face = None
                for f in new_learnt_face:
                    if max_box < f.size:
                        max_box = f.size
                        biggest_face = f
                if len(new_learnt_face) > 0:
                    self.STATUS != 'LEARNT'
                    oldId = biggest_face.label
                    del self.faceList[oldId]
                    biggest_face.label = _labelToLearn
                    self.faceList[_labelToLearn] = biggest_face
                    rospy.loginfo("")
                    os.rename(
                        self.FACE_FOLDER_AUTO + "/" + oldId + '.png',
                        self.FACE_FOLDER_AUTO + "/" + _labelToLearn + '.png')
                    rospy.loginfo("BIGGEST FACE of " +
                                  str(len(new_learnt_face)) + ":" +
                                  biggest_face.label)

            return frame, label_r, top_r, left_r, bottom_r, right_r, detected_faces_list
        except CvBridgeError as e:
            rospy.logwarn(e)
            return "no Value"
    def process_img_face(self,data, name_w, current_status, isImgFace):
	    #self.frames = self.frames + 1
            detected_faces_list=[]
            new_learnt_face=[]
            face_locations=[]
            label_r='NONE'
            if(current_status== None):
                current_status=self.STATUS
            try:
                # Conver image to numpy array
                frame = self._bridge.imgmsg_to_cv2(data, 'bgr8')
                frame_copy = self._bridge.imgmsg_to_cv2(data, 'bgr8')

                if not isImgFace:
                    if self.face_detection_mode==2:
                        face_locations = face_recognition.face_locations(frame, number_of_times_to_upsample=0, model="cnn")
			#print(face_locations)
                    elif self.face_detection_mode==1:
                        face_locations = face_recognition.face_locations(frame)
                    else:
                        face_locations= self.cascadeHaarFaceDetection.processImg(frame)
                        #rospy.logwarn(face_locations)
                else:
                    face_locations=[]
                    face_locations.append((long(0), long(0 + frame.shape[0]), long(0 + frame.shape[1]), long(0)))

                i=0
                for location in face_locations:
		    
                    i=i+1
		    
                if i==0:
                    rospy.logdebug("NO FACE DETECTED ON THE CURRENT IMG")


                #if isImgFace:
                #    face_encodings = face_recognition.face_encodings(frame)
                #else:
                face_encodings = face_recognition.face_encodings(frame, face_locations)

                # Find all the faces and face enqcodings in the frame of video
                top_r=0
                bottom_r=0
                left_r=0
                right_r=0
		r_count = 0
		
		person_detections = []##to store all detections and choose the det with highest confidence
                # Loop through each face in this frame oisFaceDetectionf video
                for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
                    # See if the face is a match for the known face(s)

                    name,distance = self._processDetectFace(top, right, bottom, left,face_encoding)
                    current_face=Face.Face(top,left,bottom,right,name,distance)
                    detected_faces_list.append(current_face)
		    #self.frames = self.frames + 1
                    rospy.logdebug("STATUS: "+current_status)
                    if (current_status==self.LEARNING_STATUS and name == "Unknown") or (self.continuous_learn and name == "Unknown") or (current_status==self.FORCE_LEARNING_STATUS):

                        label_tmp=str(uuid.uuid1())
                        rospy.loginfo("unknown face: launch learn operation")
                        self._processLearnFace(top, right, bottom, left,face_encoding,label_tmp,frame_copy,new_learnt_face)
                    label_r=name
                    # Draw a box around the face
                    #cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

                    # Draw a label with a name below the face
                    #cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                    font = cv2.FONT_HERSHEY_DUPLEX
		    x0, y0 =self._processBoxCenter(top, right, bottom, left)
           	    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
		    cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
		    cv2.putText(frame, str(round(distance*100,1))+"% "+name, (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)
		    cv2.circle(frame, (x0, y0), 5, (0, 255, 0), cv2.FILLED)
		    #print(name[0:8],left,top,right,bottom,str(round(distance*100,1)))
		    if(name[0:8]=="ruthrash"):
			person_detections.append((left,top,right,bottom,str(round(distance*100,1))))
			r_count = r_count+1
			if(r_count>1):
				self.fp_count = self.fp_count+1
			self.count = self.count+1
		    
                    #cv2.putText(frame, str(round(distance*100,1))+"% "+name, (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)

                    #x0, y0 =self._processBoxCenter(top, right, bottom, left)
                    #cv2.circle(frame, (x0, y0), 5, (0, 255, 0), cv2.FILLED)
                    top_r=top
                    bottom_r=bottom
                    left_r=left
                    right_r=right
                    #return frame,name,top,left,bottom,right
		max_conf = 0	
		#print(str(text))
		#choose person detection with highest confidence
		#'''
		for i in range(len(person_detections)):
			if(person_detections[i][4]>=max_conf):
				max_conf = person_detections[i][4]
				self.face_location = person_detections[i][0:4]		
				
		##self.face_location
		poses = []
		#print(self.face_location)
		
		if len(person_detections)>=1:
			self.detected_person_flag = True
		        x0, y0 =self._processBoxCenter(self.face_location[1], self.face_location[2], self.face_location[3], self.face_location[0])
			cv2.rectangle(frame, (self.face_location[0], self.face_location[1]), (self.face_location[2], self.face_location[3]), (0, 0, 255), 2)
			cv2.rectangle(frame, (self.face_location[0], self.face_location[3] - 35), (self.face_location[2], self.face_location[3]), (0, 0, 255), cv2.FILLED)
			cv2.putText(frame, str(round(distance*100,1))+"% "+'ruthrash', (self.face_location[0] + 6, self.face_location[3] - 6), font, 0.8, (255, 255, 255), 1)
			cv2.circle(frame, (x0, y0), 10, (0, 255, 0), cv2.FILLED)
			#poses =  
			min_dist = 100
			min_x0 = 0
			min_y0 = 0
			if self.poses_flag==True:
				for person in self.pose_detections.persons:
					 pose = []
					 for bodyPart in person.bodyParts:
						pose.append(bodyPart.pixel)
					 #print(pose[0] ,(x0,y0))
					 #print(np.linalg.norm(np.array((pose[0].x,pose[0].y)) - np.array((x0,y0))))
					 dist =	 np.linalg.norm(np.array((pose[0].x,pose[0].y)) - np.array((x0,y0)))
					 if(min_dist > dist):
						min_dist = dist
						index = len(poses)
						min_x0 = x0
						min_y0 = y0
				
					 poses.append((pose))
		
				#cv2.circle(frame, (min_x0, min_y0), 10, (0, 255, 0), cv2.FILLED)
				self.neck = [int(poses[index][1].x), int(poses[index][1].y)]
				cv2.circle(frame, (int(poses[index][0].x), int(poses[index][0].y)), 10, (0, 255, 255), cv2.FILLED)
			#print(len(poses))
		# '''	
		if(self.neck[0]!=0 and self.neck[1]!=0): 		
			cv2.circle(frame, (int(self.neck[0]), int(self.neck[1])), 10, (255, 0, 0), cv2.FILLED)
			cv2.putText(frame, 'ruthrash', (int(self.neck[0] + 6), int(self.neck[1]-6)), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1)
			self.track = 0
			
		#print((poses[index][0].x,poses[index][0].y),(min_x0,min_y0))
                #update label
                if name_w== None:
                    _labelToLearn=self.labelToLearn
                else:
                    _labelToLearn=name_w

                #check the biggest face learnt
                if not self.continuous_learn:
                    max_box=0
                    biggest_face=None
                    for f in new_learnt_face:
                        if max_box<f.size:
                            max_box=f.size
                            biggest_face=f
                    if len(new_learnt_face)>0:
                        self.STATUS!='LEARNT'
                        oldId=biggest_face.label
                        del self.faceList[oldId]
                        biggest_face.label=_labelToLearn
                        self.faceList[_labelToLearn]=biggest_face
                        rospy.loginfo("")
                        os.rename(self.FACE_FOLDER_AUTO+"/"+oldId+'.png', self.FACE_FOLDER_AUTO+"/"+_labelToLearn+'.png')
                        rospy.loginfo("BIGGEST FACE of "+str(len(new_learnt_face))+":"+biggest_face.label)


		print("no.of frames, detections_count ("+str(self.frames)+","+str(self.count)+","+str(self.fp_count)+")")
                return frame,label_r,top_r,left_r,bottom_r,right_r,detected_faces_list
            except CvBridgeError as e:
                    rospy.logwarn(e)
                    return "no Value"