def __init__(self): """ Class constructor. """ self._faceDet = FaceDetector() ''' The instance of the face detector. ''' self._bank = GaborBank() ''' The instance of the bank of Gabor filters. ''' self._emotionsDet = EmotionsDetector() ''' The instance of the emotions detector. ''' self._face = FaceData() ''' Data of the last face detected. ''' self._emotions = OrderedDict() '''
def __init__(self): # 얼굴 검출기 인스턴스 self._faceDet = FaceDetector() # Gabor 인스턴스 self._bank = GaborBank() # 감정 탐지기 self._emotionsDet = EmotionsDetector() # 검출 된 마지막 얼굴 데이터 self._face = FaceData() # 감지 된 마지막 검정 데이터 self._emotions = OrderedDict()
class VideoData: """ Helper class to present the detected face region, landmarks and emotions. """ #----------------------------------------- def __init__(self): """ Class constructor. """ self._faceDet = FaceDetector() ''' The instance of the face detector. ''' self._bank = GaborBank() ''' The instance of the bank of Gabor filters. ''' self._emotionsDet = EmotionsDetector() ''' The instance of the emotions detector. ''' self._face = FaceData() ''' Data of the last face detected. ''' self._emotions = OrderedDict() ''' Data of the last emotions detected. ''' #----------------------------------------- def detect(self, frame): """ Detects a face and the prototypic emotions on the given frame image. Parameters ---------- frame: numpy.ndarray Image where to perform the detections from. Returns ------- ret: bool Indication of success or failure. """ ret, face = self._faceDet.detect(frame) if ret: self._face = face # Crop just the face region frame, face = face.crop(frame) # Filter it with the Gabor bank responses = self._bank.filter(frame) # Detect the prototypic emotions based on the filter responses self._emotions = self._emotionsDet.detect(face, responses) return True else: self._face = None return False #----------------------------------------- def draw(self, frame): """ Draws the detected data of the given frame image. Parameters ---------- frame: numpy.ndarray Image where to draw the information to. """ # Font settings font = cv2.FONT_HERSHEY_SIMPLEX scale = 0.5 thick = 1 glow = 3 * thick # Color settings black = (0, 0, 0) white = (255, 255, 255) yellow = (0, 255, 255) red = (0, 0, 255) empty = True # Plot the face landmarks and face distance x = 5 y = 0 w = int(frame.shape[1] * 0.2) try: face = self._face empty = face.isEmpty() face.draw(frame) except: pass # Plot the emotion probabilities try: emotions = self._emotions if empty: labels = [] values = [] else: labels = list(emotions.keys()) values = list(emotions.values()) bigger = labels[values.index(max(values))] fo = open("foo.txt", "wb") fo.write(bytes(bigger, 'UTF-8')) fo.close() print(bigger) # Draw the header text = 'emotions' size, _ = cv2.getTextSize(text, font, scale, thick) y += size[1] + 20 cv2.putText(frame, text, (x, y), font, scale, black, glow) cv2.putText(frame, text, (x, y), font, scale, yellow, thick) y += 5 cv2.line(frame, (x, y), (x + w, y), black, 1) size, _ = cv2.getTextSize('happiness', font, scale, thick) t = size[0] + 20 w = 150 h = size[1] for l, v in zip(labels, values): lab = '{}:'.format(l) val = '{:.2f}'.format(v) size, _ = cv2.getTextSize(l, font, scale, thick) # Set a red color for the emotion with bigger probability color = red if l == bigger else yellow y += size[1] + 15 p1 = (x + t, y - size[1] - 5) p2 = (x + t + w, y - size[1] + h + 5) cv2.rectangle(frame, p1, p2, black, 1) # Draw the filled rectangle proportional to the probability p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1]) cv2.rectangle(frame, p1, p2, color, -1) cv2.rectangle(frame, p1, p2, black, 1) # Draw the emotion label cv2.putText(frame, lab, (x, y), font, scale, black, glow) cv2.putText(frame, lab, (x, y), font, scale, color, thick) # Draw the value of the emotion probability cv2.putText(frame, val, (x + t + 5, y), font, scale, black, glow) cv2.putText(frame, val, (x + t + 5, y), font, scale, white, thick) except Exception as e: print(e) pass
class VideoData: def __init__(self): # 얼굴 검출기 인스턴스 self._faceDet = FaceDetector() # Gabor 인스턴스 self._bank = GaborBank() # 감정 탐지기 self._emotionsDet = EmotionsDetector() # 검출 된 마지막 얼굴 데이터 self._face = FaceData() # 감지 된 마지막 검정 데이터 self._emotions = OrderedDict() # 주어진 프레임 이미지에서 얼굴과 프로토 타입 감정을 감지 def detect(self, frame): # 탐지를 수행할 이밎 ret, face = self._faceDet.detect(frame) if ret: self._face = face # 얼굴 영역 잘라내기 frame, face = face.crop(frame) # Gabor 로 필터링 responses = self._bank.filter(frame) # 필터 응답을 기반으로 프로토 타입 감정을 감지 self._emotions = self._emotionsDet.detect(face, responses) return True else: self._face = None return False # 지정된 프레임 이미지의 감지 된 데이터를 그림 def draw(self, frame): # 글꼴 설정 , 스케일 , 두께 , 글로우 , count font = cv2.FONT_HERSHEY_SIMPLEX scale = 0.5 thick = 1 glow = 3 * thick global cnt global neutral_cnt global happiness_cnt global sadness_cnt global ange_cnt global fear_cnt global surprise_cnt global disgust_cnt black = (0, 0, 0) white = (255, 255, 255) yellow = (0, 255, 255) red = (0, 0, 255) empty = True # 얼굴 표식과 얼굴 거리를 그림 x = 5 y = 0 w = int(frame.shape[1] * 0.2) try: face = self._face empty = face.isEmpty() face.draw(frame) except: pass try: emotions = self._emotions if empty: labels = [] values = [] else: labels = list(emotions.keys()) values = list(emotions.values()) bigger = labels[values.index(max(values))] text = 'emotions' # 감정 size, _ = cv2.getTextSize(text, font, scale, thick) y += size[1] + 20 # 프레임, 텍스트, (x,y), 글꼴, 크기 , 검정 , 광선 cv2.putText(frame, text, (x, y), font, scale, black, glow) # 프레임 ,텍스트 ,(x,y), 글꼴 , 크기, 노란색 , 두께 cv2.putText(frame, text, (x, y), font, scale, yellow, thick) y += 5 cv2.line(frame, (x, y), (x + w, y), black, 1) # 행복 , 글꼴 ,크기 , 두께 size, _ = cv2.getTextSize('happiness', font, scale, thick) t = size[0] + 20 w = 150 h = size[1] for l, v in zip(labels, values): lab = '{}:'.format(l) val = '{:.2f}'.format(v) size, _ = cv2.getTextSize(l, font, scale, thick) # 가장 큰 확률은 red 로 나머진 yellow color = red if l == bigger else yellow if color == red: if l == 'neutral': neutral_cnt += 1 if l == 'happiness': happiness_cnt += 1 if l == 'sadness': sadness_cnt += 1 if l == 'anger': ange_cnt += 1 if l == 'fear': fear_cnt += 1 if l == 'surprise': surprise_cnt += 1 if l == 'disgust': disgust_cnt += 1 cnt = [ neutral_cnt, happiness_cnt, sadness_cnt, ange_cnt, fear_cnt, surprise_cnt, disgust_cnt ] text1 = ' count ' size, _ = cv2.getTextSize(text1, font, scale, thick) a = 300 # → b = 33 # ↓ cv2.putText(frame, text1, (a, b), font, scale, black, glow) cv2.putText(frame, text1, (a, b), font, scale, yellow, thick) text2 = '{:d}'.format(cnt[0]) # 무표정 a = 300 # → b = 66 # ↓ cv2.putText(frame, text2, (a, b), font, scale, black, glow) cv2.putText(frame, text2, (a, b), font, scale, yellow, thick) text3 = '{:d}'.format(cnt[1]) # 행복 a = 300 # → b = 90 # ↓ cv2.putText(frame, text3, (a, b), font, scale, black, glow) cv2.putText(frame, text3, (a, b), font, scale, yellow, thick) text4 = '{:d}'.format(cnt[2]) # 슬픔 a = 300 # → b = 111 # ↓ cv2.putText(frame, text4, (a, b), font, scale, black, glow) cv2.putText(frame, text4, (a, b), font, scale, yellow, thick) text5 = '{:d}'.format(cnt[3]) # 분노 a = 300 # → b = 140 # ↓ cv2.putText(frame, text5, (a, b), font, scale, black, glow) cv2.putText(frame, text5, (a, b), font, scale, yellow, thick) text6 = '{:d}'.format(cnt[4]) # 공포 a = 300 # → b = 170 # ↓ cv2.putText(frame, text6, (a, b), font, scale, black, glow) cv2.putText(frame, text6, (a, b), font, scale, yellow, thick) text7 = '{:d}'.format(cnt[5]) # 놀라움 a = 300 # → b = 200 # ↓ cv2.putText(frame, text7, (a, b), font, scale, black, glow) cv2.putText(frame, text7, (a, b), font, scale, yellow, thick) text8 = '{:d}'.format(cnt[6]) # 혐오감 a = 300 # → b = 230 # ↓ cv2.putText(frame, text8, (a, b), font, scale, black, glow) cv2.putText(frame, text8, (a, b), font, scale, yellow, thick) y += size[1] + 15 p1 = (x + t, y - size[1] - 5) p2 = (x + t + w, y - size[1] + h + 5) cv2.rectangle(frame, p1, p2, black, 1) # 확률에 비례하는 채워진 사각형을 그립니다. p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1]) cv2.rectangle(frame, p1, p2, color, -1) cv2.rectangle(frame, p1, p2, black, 1) # 감정 레이블을 그립니다 # 프레임 , 레이블 , 폰트 , 블랙 , 글로우 # 프레임 , 실습 , 글꼴 , 크기 , 색상 , 두께 cv2.putText(frame, lab, (x, y), font, scale, black, glow) cv2.putText(frame, lab, (x, y), font, scale, color, thick) # 감정 확률의 값을 그립니다 # 프레임, 값 , 폰트 , 스케일 , 블랙 , 글로우 # 프레임, 값 , 글꼴 , 크기 , 흰색 , 두꺼운 cv2.putText(frame, val, (x + t + 5, y), font, scale, black, glow) cv2.putText(frame, val, (x + t + 5, y), font, scale, white, thick) except Exception as e: print(e) pass
class VideoData: """ Helper class to present the detected face region, landmarks and emotions. """ #----------------------------------------- def __init__(self): """ Class constructor. """ self._faceDet = FaceDetector() ''' The instance of the face detector. ''' self._bank = GaborBank() ''' The instance of the bank of Gabor filters. ''' self._emotionsDet = EmotionsDetector() ''' The instance of the emotions detector. ''' self._face = FaceData() ''' Data of the last face detected. ''' self._emotions = OrderedDict() ''' Data of the last emotions detected. ''' #----------------------------------------- def detect(self, frame): """ Detects a face and the prototypic emotions on the given frame image. Parameters ---------- frame: numpy.ndarray Image where to perform the detections from. Returns ------- ret: bool Indication of success or failure. """ ret, face = self._faceDet.detect(frame) if ret: self._face = face # Crop just the face region frame, face = face.crop(frame) # Filter it with the Gabor bank responses = self._bank.filter(frame) # Detect the prototypic emotions based on the filter responses self._emotions = self._emotionsDet.detect(face, responses) return True else: self._face = None return False #--------------------------------------------- def imprimir_tempo(self, tempo, frame, lab, val, fps): [ano, mes, dia, hora, minuto, segundo, nano] = tempo.split('/') nano = int(nano) segundo = int(segundo) minuto = int(minuto) hora = int(hora) dia = int(dia) mes = int(mes) ano = int(ano) tempoPassado = frame*int(1000000000/fps) nano += tempoPassado%1000000000 tempoPassado = int(tempoPassado/1000000000) if nano >= 1000000000: nano -= 1000000000 tempoPassado += 1 segundo += tempoPassado%60 tempoPassado = int(tempoPassado/60) if segundo >= 60: segundo -= 60 tempoPassado += 1 minuto += tempoPassado%60 tempoPassado = int(tempoPassado/60) if minuto >= 60: minuto -= 60 tempoPassado += 1 hora += tempoPassado%60 tempoPassado = int(tempoPassado/24) if hora >= 24: hora -= 24 tempoPassado += 1 dia += tempoPassado%24 if hora >= 24: hora -= 24 saida = str(ano) + '/' + str(mes) + '/' + str(dia) + '/' + str(hora) + '/' + str(minuto) + '/' + str(segundo) + '/' + str(nano) + '-' + lab + '-' + val print(saida) #----------------------------------------- def draw(self, frame, tempo, frameNum, fps): """ Draws the detected data of the given frame image. Parameters ---------- frame: numpy.ndarray Image where to draw the information to. """ empty = True try: face = self._face empty = face.isEmpty() face.draw(frame) except: pass # Plot the emotion probabilities try: emotions = self._emotions if empty: labels = [] values = [] else: labels = list(emotions.keys()) values = list(emotions.values()) bigger = labels[values.index(max(values))] for l, v in zip(labels, values): lab = '{}'.format(l) val = '{:.2f}'.format(v) self.imprimir_tempo(tempo, frameNum, lab, val, fps) except Exception as e: print(e) pass
class VideoData: """ Helper class to present the detected face region, landmarks and emotions. """ #----------------------------------------- def __init__(self): """ Class constructor. """ self._faceDet = FaceDetector() ''' The instance of the face detector. ''' self._bank = GaborBank() ''' The instance of the bank of Gabor filters. ''' self._emotionsDet = EmotionsDetector() ''' The instance of the emotions detector. ''' self._face = FaceData() ''' Data of the last face detected. ''' self._emotions = OrderedDict() ''' Data of the last emotions detected. ''' #----------------------------------------- def detect(self, frame): """ Detects a face and the prototypic emotions on the given frame image. Parameters ---------- frame: numpy.ndarray Image where to perform the detections from. Returns ------- ret: bool Indication of success or failure. """ ret, face = self._faceDet.detect(frame) if ret: self._face = face # Crop just the face region frame, face = face.crop(frame) # Filter it with the Gabor bank responses = self._bank.filter(frame) # Detect the prototypic emotions based on the filter responses self._emotions = self._emotionsDet.detect(face, responses) return True else: self._face = None return False #--------------------------------------------- def imprimir_tempo(self, tempo, frame, lab, val, fps): [ano, mes, dia, hora, minuto, segundo, nano] = tempo.split('/') nano = int(nano) segundo = int(segundo) minuto = int(minuto) hora = int(hora) dia = int(dia) mes = int(mes) ano = int(ano) tempoPassado = frame*int(1000000000/fps) nano += tempoPassado%1000000000 tempoPassado = int(tempoPassado/1000000000) if nano >= 1000000000: nano -= 1000000000 tempoPassado += 1 segundo += tempoPassado%60 tempoPassado = int(tempoPassado/60) if segundo >= 60: segundo -= 60 tempoPassado += 1 minuto += tempoPassado%60 tempoPassado = int(tempoPassado/60) if minuto >= 60: minuto -= 60 tempoPassado += 1 hora += tempoPassado%60 tempoPassado = int(tempoPassado/24) if hora >= 24: hora -= 24 tempoPassado += 1 dia += tempoPassado%24 if hora >= 24: hora -= 24 saida = str(ano) + '/' + str(mes) + '/' + str(dia) + '/' + str(hora) + '/' + str(minuto) + '/' + str(segundo) + '/' + str(nano) + '-' + lab + '-' + val print(saida) #----------------------------------------- def drawFrame(self, frame, labels): atual = -1 preto = (0,0,0) amarelo = (0,255,255) soft = TAM_LINHA font = cv2.FONT_HERSHEY_SIMPLEX cv2.line(frame, (COM_X,FIM_Y), (FIM_X, FIM_Y), preto, soft) y = COM_Y for l in labels: atual += 1 lab = '{}:'.format(l) x = OFFSETLETRA_X y = OFFSETLETRA_FRAME - atual*TAM_LET #size, _ = cv2.getTextSize(lab, font, 1, soft) #maior label tem tamanho de (164,22) #print (size) cv2.putText(frame, lab, (x, y+OFFSETLETRA_Y), font, 1, amarelo, soft) cv2.line(frame, (COM_X,y), (FIM_X, y), preto, soft) cv2.line(frame, (DIVISOR_X, y), (DIVISOR_X,FIM_Y), preto, soft) cv2.line(frame, (FIM_X, y), (FIM_X,FIM_Y), preto, soft) #cv2.line(frame, (600, y), (600,465), cor, soft) return frame #----------------------------------------- def draw(self, frame, tempo, frameNum, vals, fps, processar): """ Draws the detected data of the given frame image. Parameters ---------- frame: numpy.ndarray Image where to draw the information to. """ amarelo = (0, 255, 255) empty = True try: face = self._face empty = face.isEmpty() except: pass # Plot the emotion probabilities try: emotions = self._emotions atual = 0 labels = ['Neutral', 'Felicidade', 'Tristeza', 'Raiva', 'Medo', 'Surpresa', 'Desgosto'] if empty: values = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] else: values = list(emotions.values()) bigger = labels[values.index(max(values))] frame = self.drawFrame(frame, labels) for l, v in zip(labels, values): lab = '{}'.format(l) val = '{:.2f}'.format(v) vals[atual].rotate(-1) vals[atual].pop() vals[atual].append(v) for i in range(PONTOS-1): valor1 = int(OFFSETPONTO - vals[atual][i]*RESOL_LINHA_Y - atual*RESOL_LINHA_Y) valor2 = int(OFFSETPONTO - vals[atual][i+1]*RESOL_LINHA_Y - atual*RESOL_LINHA_Y) cv2.line(frame, (OFFSETLINHA+RESOL_LINHA_X*i, valor1), (OFFSETLINHA+RESOL_LINHA_X*(i+1), valor2 ), amarelo, TAM_LINHA) #cv2.putText(frame, val, (5, 20 + atual*25), font, 1, yellow, 1) #cv2.putText(frame, '{}'.format(vals[atual][199]), (320, 20 + atual*25), font, 1, yellow, 1) if processar: self.imprimir_tempo(tempo, frameNum, lab, val, fps) atual += 1 return frame, vals except Exception as e: print(e) pass
class VideoData: """ Helper class to present the detected face region, landmarks and emotions. """ #----------------------------------------- def __init__(self, imagepath=None): """ Class constructor. """ self._faceDet = FaceDetector() ''' The instance of the face detector. ''' self._bank = GaborBank() ''' The instance of the bank of Gabor filters. ''' self._emotionsDet = EmotionsDetector() ''' The instance of the emotions detector. ''' self._face = FaceData() ''' Data of the last face detected. ''' self._emotions = OrderedDict() ''' Data of the last emotions detected. ''' print(imagepath) #----------------------------------------- def detect(self, frame, face): """ Detects a face and the prototypic emotions on the given frame image. Parameters ---------- frame: numpy.ndarray Image where to perform the detections from. Returns ------- ret: bool Indication of success or failure. """ listofemotions = [] ## ret, face = self._faceDet.detect(frame) ## if ret: ## self._face = face # Crop just the face region frame, face = face.crop(frame) # Filter it with the Gabor bank responses = self._bank.filter(frame) # Detect the prototypic emotions based on the filter responses self._emotions = self._emotionsDet.detect(face, responses) listofemotions.append(self._emotions) return True, listofemotions #----------------------------------------- def draw(self, frame, face): """ Draws the detected data of the given frame image. Parameters ---------- frame: numpy.ndarray Image where to draw the information to. """ # Font settings font = cv2.FONT_HERSHEY_SIMPLEX scale = 0.5 thick = 1 glow = 3 * thick # Color settings black = (0, 0, 0) white = (255, 255, 255) yellow = (0, 255, 255) red = (0, 0, 255) empty = True ## # Plot the face landmarks and face distance try: empty = face.isEmpty() face.draw(frame) x = face.region[0] y = face.region[1] w = int(frame.shape[1] * 0.2) except: pass # Plot the emotion probabilities try: emotions = self._emotions if empty: labels = [] values = [] else: labels = list(emotions.keys()) values = list(emotions.values()) bigger = labels[values.index(max(values))] # Draw the header text = 'emotions' size, _ = cv2.getTextSize(text, font, scale, thick) y += size[1] + 20 cv2.putText(frame, text, (x, y), font, scale, black, glow) cv2.putText(frame, text, (x, y), font, scale, yellow, thick) y += 5 cv2.line(frame, (x, y), (x + w, y), black, 1) size, _ = cv2.getTextSize('happiness', font, scale, thick) t = size[0] + 20 w = 150 h = size[1] for l, v in zip(labels, values): lab = '{}:'.format(l) val = '{:.2f}'.format(v) size, _ = cv2.getTextSize(l, font, scale, thick) # Set a red color for the emotion with bigger probability color = red if l == bigger else yellow y += size[1] + 15 p1 = (x + t, y - size[1] - 5) p2 = (x + t + w, y - size[1] + h + 5) cv2.rectangle(frame, p1, p2, black, 1) ## # Draw the filled rectangle proportional to the probability ## p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1]) ## cv2.rectangle(frame, p1, p2, color, -1) ## cv2.rectangle(frame, p1, p2, black, 1) # Draw the emotion label cv2.putText(frame, lab, (x, y), font, scale, black, glow) cv2.putText(frame, lab, (x, y), font, scale, color, thick) # Draw the value of the emotion probability cv2.putText(frame, val, (x + t + 5, y), font, scale, black, glow) cv2.putText(frame, val, (x + t + 5, y), font, scale, white, thick) except Exception as e: print(e) pass return bigger def unknown_image_encoded(self, img): """ encode a face given the file name """ face = fr.load_image_file("faces/" + img) encoding = fr.face_encodings(face)[0] return encoding def classify_face(self, im, face): """ will find all of the faces in a given image and label them if it knows what they are :param im: str of file path :return: list of face names """ encoded = {} listofnames = [] for dirpath, dnames, fnames in os.walk("./faces"): for f in fnames: if f.endswith(".jpg") or f.endswith(".png"): face = fr.load_image_file("faces/" + f) encoding = fr.face_encodings(face)[0] encoded[f.split(".")[0]] = encoding faces = encoded faces_encoded = list(faces.values()) known_face_names = list(faces.keys()) ## image = cv2.imread(im, 1) ## img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) ## img = img[:,:,::-1] ## scale_percent=0.30 ## width=int(im.shape[1]*scale_percent) ## height=int(im.shape[0]*scale_percent) ## dimension=(width,height) ## img=cv2.resize(im,dimension,interpolation=cv2.INTER_AREA) face_locations = face_recognition.face_locations(im) unknown_face_encodings = face_recognition.face_encodings( im, face_locations) face_names = [] for face_encoding in unknown_face_encodings: # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(faces_encoded, face_encoding) name = "Unknown" # use the known face with the smallest distance to the new face face_distances = face_recognition.face_distance( faces_encoded, face_encoding) best_match_index = np.argmin(face_distances) if matches[best_match_index]: name = known_face_names[best_match_index] face_names.append(name) listofnames.append(name) for (top, right, bottom, left), name in zip(face_locations, face_names): # Draw a box around the face cv2.rectangle(im, (left - 20, top - 20), (right + 20, bottom + 20), (255, 0, 0), 2) # Draw a label with a name below the face cv2.rectangle(im, (left - 20, bottom - 15), (right + 20, bottom + 20), (255, 0, 0), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(im, name, (left - 20, bottom + 15), font, 1.0, (255, 255, 255), 2) return (listofnames)