def check_smile(frame, bounding_box): boxes = [] _status = False try: # Trata a imagem em canal de cinza (normalizacao de pixels) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) smiles = smileCascade.detectMultiScale(gray, scaleFactor=1.7, minNeighbors=22, minSize=(25, 25)) if len(smiles) > 0: _status = True if dlc.CONFIG["DEBUG"]: print("Sorriso encontrado: ", len(smiles)) for (x, y, w, h) in smiles: boxes.append((x + bounding_box[0], y + bounding_box[1], w, h)) except Exception as e: dlc.console("Erro: " + str(e)) return _status, boxes
def find_faces( p_frame, #detector, scale_factor=1.1, min_neighbors=5, min_zise=20, face_crop_size=dlc.CONFIG["CROP_SIZE"], padding=dlc.CONFIG["PADDING_DETECTION"], debug=True): # Pre-processamento da imagem frame = np.copy(p_frame) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) boxes = [] faces = [] try: # Detector HOG+SVM (dlib) / Realizando deteccao rects = detector(gray, 1) if debug: print("Boxes: " + str(list(rects))) for i, bb in enumerate(rects): face = Face() face.container_image = frame face.bounding_box = np.zeros(4, dtype=np.int32) # Delimitando face # x x = bb.left() - padding[0] face.bounding_box[0] = int(x) # y y = bb.top() - padding[1] face.bounding_box[1] = int(y) # w w = bb.right() + padding[0] face.bounding_box[2] = int(w - bb.left()) # h h = (bb.bottom() + padding[1]) face.bounding_box[3] = int(h - bb.top()) if debug: print("BB (dlib): " + str(bb)) print("BB (face): " + str(face.bounding_box)) #cropped = frame[int(y):int(h), int(x):int(w), :] cropped = frame[face.bounding_box[1]:(h - padding[1]), face.bounding_box[0]:(w - padding[0]), :] face.image = misc.imresize(cropped, (face_crop_size, face_crop_size), interp='bilinear') #cv2.imshow('...', cropped) faces.append(face) #break except Exception as e: dlc.console("Erro: " + str(e)) return faces #, rects
def draw_boxes( p_frame, faces, boxColor=dlc.CONFIG["BOX_COLOR"], boxLine=dlc. CONFIG[ "BOX_LINE"], #= dlc.CONFIG["COLOR_DETECTION"], boxLine = dlc.CONFIG["LINE_DETECTION"], padding=dlc.CONFIG["PADDING_DETECTION"], frame_rate=0, debug=True): frame = np.copy(p_frame) try: if debug: dlc.console("Faces detectadas: " + str(len(faces))) if faces is not None: for face in faces: face_bb = face.bounding_box.astype(int) (x, y, w, h) = (face_bb[0], face_bb[1], face_bb[2], face_bb[3]) cv2.rectangle( frame, #boxPositions, boxSizes, (x - padding[0], y - padding[1]), (w + x + padding[0], h + y + padding[1]), boxColor, boxLine) except Exception as e: dlc.console("Erro: " + str(e)) return frame
def detect(face): x = [] result = None try: face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY) gray = imresize(face, [height, width], 'bilinear') gray = np.dstack((gray, ) * 3) x.append(gray) x = np.asarray(x) result = model.predict(x, batch_size=8, verbose=0) except Exception as e: dlc.console("Erro deteccao emocao: " + str(e)) return result
def plot(p_frame, result, color_text=(0, 0, 255), color_chart=(255, 0, 0), margin_top=60): frame = np.copy(p_frame) try: for index, emotion in enumerate(EMOTIONS): cv2.putText(frame, emotion, (10, index * 20 + 20 + margin_top), cv2.FONT_HERSHEY_PLAIN, 1, color_text, 1) cv2.rectangle(frame, (130, index * 20 + 10 + margin_top), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4 + margin_top), color_chart, -1) except Exception as e: dlc.console("Erro plot emocoes: " + str(e)) return frame
def find_landmarks(p_frame, faces): frame = np.copy(p_frame) landmarks = [] rect = None try: if len(faces) > 0: #for face in faces: #(x,y,w,h) = [int(i) for i in faces[0].bounding_box] # https://github.com/davisking/dlib/issues/545 #rect = dlib.rectangle(x,y,w,h)#x+w,y+h) rect = bb_to_rect(faces[0].bounding_box) landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()]) except Exception as e: dlc.console("Erro aqui: " + str(e)) return landmarks, rect
def draw_landmarks(p_frame, landmarks, showLandmarksIdx=False): frame = np.copy(p_frame) try: if landmarks is not None: # Desenhar landmarks no frame #landmarks = landmarks[0] for idx, point in enumerate(landmarks): #print(point) pos = (point[0, 0], point[0, 1]) cv2.circle(frame, pos, 3, color=(0, 255, 255)) if showLandmarksIdx: cv2.putText(frame, str(idx), pos, fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale=0.4, color=(0, 0, 255)) except Exception as e: dlc.console("Erro draw: " + str(e)) return frame
def save_register(face, frame = None): if register.id and register.name: if not np.all(face == 0): _img = face # Nome do arquivo (imagem) _nome = str(register.name).lower().replace(" ","_") _id = str(register.id).lower().replace(" ","_") _filename = _nome+"-"+_id # Pastas de armazenamento das imagens (captura / facenet-original) #_folder_facenet = os.path.join("dataset", "captura", _filename) _folder_captura = os.path.join(dlc.CONFIG["DATASET_CAPTURE_PATH"], _filename) if not os.path.isdir(_folder_captura): os.mkdir(_folder_captura) #_folder_facenet = os.path.join("dataset", "original", _filename) _folder_facenet = os.path.join(dlc.CONFIG["DATASET_TRAIN_PATH"], _filename) if not os.path.isdir(_folder_facenet): os.mkdir(_folder_facenet) # Nomeando o arquivo #_total = int(sum([len(files) for r, d, files in os.walk(_folder_facenet)])) _total = 1 _total += len(glob.glob(_folder_facenet+"/*.jpg")) _total += len(glob.glob(_folder_facenet+"/*.png")) arquivo = _filename + "_" + str(_total) + ".jpg" arquivo_captura = os.path.join(_folder_captura, arquivo) arquivo_facenet = os.path.join(_folder_facenet, arquivo) try: # Imagem orgiginal (cena frame) if frame: cv2.imwrite(arquivo_captura, frame) # Padrao facenet (apenas face) #_resized_img = cv2.resize(_img, (160, 160)) # VALIDAR!!! cv2.imwrite(arquivo_facenet, _img) except Exception as e: dlc.console("Erro ao salvar imagem: "+str(e)) else: if dlc.CONFIG["DEBUG"]: dlc.console("Erro -> Nenhuma face detectada na imagem, tente novamente!") QMessageBox.warning(None, "Erro", "Nenhuma face detectada na imagem, tente novamente!") else: if dlc.CONFIG["DEBUG"]: dlc.console("Erro -> Campos 'Nome' e 'Identificador' são obrigatórios!") QMessageBox.warning(None, "Erro", "Campos 'Nome' e 'Identificador' são obrigatórios!")
def draw_boxes(p_frame, faces, boxColor=dlc.CONFIG["BOX_COLOR"], boxLine=dlc.CONFIG["BOX_LINE"], padding=dlc.CONFIG["PADDING_DETECTION"], frame_rate=0, debug=True): frame = np.copy(p_frame) try: if debug: dlc.console("Faces detectadas: " + str(len(faces))) if faces is not None: for face in faces: face_bb = face.bounding_box.astype(int) (x, y, w, h) = (face_bb[0], face_bb[1], face_bb[2], face_bb[3]) cv2.rectangle( frame, #boxPositions, boxSizes, #(x - padding[0], y - padding[1]), (w + x + padding[0], h + y + padding[1]), (x, y), (w, h), boxColor, boxLine) except Exception as e: dlc.console("Erro: " + str(e)) return frame # class Mtcnn: # # face detection parameters # minsize = 20 # minimum size of face # threshold = [0.6, 0.7, 0.7] # three steps's threshold # factor = 0.709 # scale factor # def __init__(self, face_crop_size=160, face_crop_margin=32): # self.pnet, self.rnet, self.onet = self._setup_mtcnn() # self.face_crop_size = face_crop_size # self.face_crop_margin = face_crop_margin # def _setup_mtcnn(self): # with tf.Graph().as_default(): # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction) # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) # with sess.as_default(): # return align.detect_face.create_mtcnn(sess, None) # def find_faces_x(self, frame): # faces = [] # bounding_boxes, _ = align.detect_face.detect_face(frame, self.minsize, # self.pnet, self.rnet, self.onet, # self.threshold, self.factor) # for bb in bounding_boxes: # face = Face() # face.container_image = frame # face.bounding_box = np.zeros(4, dtype=np.int32) # img_size = np.asarray(frame.shape)[0:2] # face.bounding_box[0] = np.maximum(bb[0] - self.face_crop_margin / 2, 0) # face.bounding_box[1] = np.maximum(bb[1] - self.face_crop_margin / 2, 0) # face.bounding_box[2] = np.minimum(bb[2] + self.face_crop_margin / 2, img_size[1]) # face.bounding_box[3] = np.minimum(bb[3] + self.face_crop_margin / 2, img_size[0]) # cropped = frame[face.bounding_box[1]:face.bounding_box[3], face.bounding_box[0]:face.bounding_box[2], :] # face.image = misc.imresize(cropped, (self.face_crop_size, self.face_crop_size), interp='bilinear') # faces.append(face) # return faces # # if face.name is not None: # # cv2.putText(frame, face.name, (face_bb[0], face_bb[3]), # # cv2.FONT_HERSHEY_SIMPLEX, 1, color, # # thickness=2, lineType=2) # # cv2.putText(frame, str(frame_rate) + " fps", (10, 30), # # cv2.FONT_HERSHEY_SIMPLEX, 1, color, # # thickness=2, lineType=2)
def find_faces( frame, scale_factor=1.3, min_neighbors=5, min_zise=dlc.CONFIG["MIN_SIZE_FACE"], face_crop_size=dlc.CONFIG["CROP_SIZE"], padding=dlc.CONFIG["PADDING_DETECTION"], #adjustPosition = (-15, -70), #adjustSize = (15, 50), debug=dlc.CONFIG["DEBUG"]): #if not os.path.exists(frontal_face_model): # dlc.console( "Nao foi possivel carregar o modelo de face do OpenCV: " + frontal_face_model ) boxes = [] faces = [] try: # Trata a imagem em canal de cinza (normalizacao de pixels) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Avalia presenca de face (inicialmente apenas 1), no frame # --- """ Funcao detectMultiScale* é chamada para detectar objetos. No caso, usando o "Face Cascade", detectara faces. Argumentos: -> "frame": imagem a ser avaliada, que é normalizada em canal de cinza, para diminuir o espaço de busca. -> "scaleFactor": controla a proporção de tamanho de objetos (objetos longe da camera, aparecem menor, e vice-versa). -> "minNeighbors": define a quantidade de objetos positivos próximos ao atual para declará-lo como localizado, uma vez que o OpenCV utiliza uma janela deslizante para detectar objetos. -> "minSize": controla o tamanho de cada janela deslizante (possivel objeto). # --- * https://docs.opencv.org/2.4/modules/objdetect/doc/cascade_classification.html#cascadeclassifier-detectmultiscale""" bounding_boxes = detector.detectMultiScale(gray, scaleFactor=scale_factor, minNeighbors=min_neighbors, minSize=(min_zise, min_zise)) for bb in bounding_boxes: face = Face() face.container_image = frame face.bounding_box = np.zeros(4, dtype=np.int32) img_size = np.asarray(frame.shape)[0:2] face.bounding_box[0] = int(bb[0] - padding[0]) face.bounding_box[1] = int(bb[1] - padding[1]) face.bounding_box[2] = int(bb[0] + bb[2] + padding[0]) face.bounding_box[3] = int(bb[1] + bb[3] + padding[1]) cropped = frame[face.bounding_box[1]:face.bounding_box[3], face.bounding_box[0]:face.bounding_box[2], :] face.image = misc.imresize(cropped, (face_crop_size, face_crop_size), interp='bilinear') faces.append(face) #break except Exception as e: dlc.console("Erro: " + str(e)) return faces
#classifier = MODEL_DIR + name_classifier #classifier_file = os.path.expanduser(dlc.CONFIG["CLASSIFIER_FILE"]) classifier_path = os.path.join(dlc.CONFIG["MODEL_PATH"], name_classifier) with open(classifier_path, 'wb') as classifier: pickle.dump((model, classes), classifier) # Exportar embeddings de cada classe idx_previous = 0 for label in set(labels): # Nome da classe (cadastro) print(str(dataset[label])) name = str(dataset[label]).split(',')[0] #print(name) # Totalizando amostras total = labels.count( int(label) ) # Recorte de embeddings da classe embeddings_class = embeddings_set[idx_previous:idx_previous+total] #print( embeddings_class ) #print( embeddings_class.shape ) # Salvando resultados np.save( os.path.join(dlc.CONFIG["DATASET_TRAIN_PATH"], name, 'embeddings.npy'), embeddings_class) idx_previous = total if debug: print('Classificador SVM salvo em "%s"!' % classifier_path) except Exception as e: dlc.console("Erro -> "+ str(e))
def camera_analisys(camera_source = 0, ret = True): # Eqto frame estiver disponivel while ret: # Captura do frame ret, frame = cap.read() if ret: if not str(camera_source).isnumeric(): # Caso IP frame = cv2.resize(frame, (dlc.CONFIG["FRAME_WIDTH"], dlc.CONFIG["FRAME_HEIGHT"])) # Gravar registro if dlc.CONFIG["RECORD_REGISTER"]: # Validar tamanho do frame (Qimg -> Apenas VGA ou FULLHD) video_recorder.write(frame) # Exibir detalhe (debug mode) if dlc.CONFIG["DEBUG"]: start_recognition = time.time() # Processar deteccao facial no frame (Localizar faces no frame (Validar depois)) faces = face_detector.find_faces(frame) # Exibir detalhe (debug mode) if dlc.CONFIG["DEBUG"]: dlc.console('Origem: {_camera} / Tempo de proc (deteccao).: {_elapsed}s'.format( _camera = camera_source, _elapsed = round(time.time()-start_recognition, 3) )) # Processar faces e mostrar informacoes em tela if len(faces)>0: # Captura Automatica if dlc.CONFIG["AUTO_CAPTURE"]: #print("S") for face in faces: save_register(face.image) #main_face = faces[0] # Apenas a face em primeiro-plano frame = face_detector.draw_boxes(frame, faces) # Montar imagens face = faces[0].image text_detection = 'Face detectada' else: face = np.zeros((dlc.CONFIG["CROP_SIZE"], dlc.CONFIG["CROP_SIZE"])) text_detection = 'Nenhuma face detectada' # Detalhes deteccao frame[0:25, 0:dlc.CONFIG["FRAME_WIDTH"], :] = 255 # ID _id = register.id if _id is None: _id = '(Nenhum)' # Nome _name = register.name if _name is None: _name = '(Nenhum)' cv2.putText(frame, 'DADOS > Id: {}, Nome: {}, Fotos: {} | STATUS > {}'.format( _id, _name, count_register_photos(_id, _name), text_detection ), (5, 15), cv2.FONT_HERSHEY_SIMPLEX, .45, (0,0,0), 1) # Comandos: frame[dlc.CONFIG["FRAME_HEIGHT"]-25:dlc.CONFIG["FRAME_HEIGHT"], 0:dlc.CONFIG["FRAME_WIDTH"], :] = 0 cv2.putText(frame, 'COMANDOS > [N] = Novo Registro [ENTER] = Salvar Face Atual | [ESC] = Sair', (5, dlc.CONFIG["FRAME_HEIGHT"]-10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255,255,255), 1) # Exibir frames em tela cv2.imshow(window_name, frame) cv2.imshow("Face Detectada", face) # Validar comandos k = cv2.waitKey(33) #print('key -> '+ str(k)) # ESC - Finalizar da opecacao if k == 27: # Esc key to stop break # N - Novo registro elif k == 110: Dialog().exec_() # ENTER / BACKSPACE - Salvar imagem elif k in [13, 32] and dlc.CONFIG["AUTO_CAPTURE"] == False: #print("S") save_register(face) else: continue cap.release() cv2.destroyAllWindows()
cap = cv2.VideoCapture(camera_source) # Ajustando parametros RESOLUCAO (se possivel - USB) e FPS (IP) # Intervalo de frames para executar reconhecimento facial (sincronia com tipo de camera) if str(camera_source).isnumeric(): # Caso USB frame_interval = 3 # Setar Resolucao: W x H (Apenas USB) cap.set( 3, dlc.CONFIG["FRAME_WIDTH"] ) #1920) cap.set( 4, dlc.CONFIG["FRAME_HEIGHT"] ) #1080) else: frame_interval = cap.get(cv2.CAP_PROP_FPS) # Exibir detalhe (debug mode) if dlc.CONFIG["DEBUG"]: dlc.console('Config: 1 Deteccao por '+str(frame_interval)+' Frame(s)') # Gravar registro if dlc.CONFIG["RECORD_REGISTER"]: video_recorder = cv2.VideoWriter( os.path.join( dlc.CONFIG["DATASET_RECORD_REGISTER"], 'register-{}.avi'.format( len(glob.glob(dlc.CONFIG["DATASET_RECORD_REGISTER"]+"/*.avi")) ) ), cv2.VideoWriter_fourcc(*'XVID'), frame_interval, (dlc.CONFIG["FRAME_WIDTH"], dlc.CONFIG["FRAME_HEIGHT"])) # Nome da janela window_name = str(dlc.CONFIG["SYSTEM_NAME"])+" ({})".format(camera_name)
def camera_proccessing(camera_name, camera_source): # Inicializar lib de reconhecimento facial recognition = pipeline.Recognition() # Instanciar camera if str(camera_source).isnumeric(): # Caso USB camera_source = int(camera_source) cap = cv2.VideoCapture(camera_source) # Ajustando FPS # Intervalo de frames para executar reconhecimento facial (sincronia com tipo de camera) if str(camera_source).isnumeric(): # Caso USB frame_interval = 3 # Setar Resolucao: W x H (Apenas USB) cap.set(3, dlc.CONFIG["FRAME_WIDTH"]) #1920) cap.set(4, dlc.CONFIG["FRAME_HEIGHT"]) #1080) else: frame_interval = cap.get(cv2.CAP_PROP_FPS) # Exibir detalhe (debug mode) if dlc.CONFIG["DEBUG"]: dlc.console('Config: 1 Reconhecimento por ' + str(frame_interval) + ' Frame(s)') # Nome da janela window_name = str(dlc.CONFIG["SYSTEM_NAME"]) + " ({})".format(camera_name) if dlc.CONFIG["SHOW_FRAME"]: cv2.namedWindow(window_name) # Variaveis de captura (iniciais) - > Validar if cap.isOpened(): ret, frame = cap.read() else: ret = False # Intervalo em segundos (display fps) fps_display_interval = 5 # Parametros de execucao do reconhecimento / display fps frame_rate = 0 frame_count = 0 start_time = time.time() # Eqto frame estiver disponivel while ret: # Captura do frame ret, frame = cap.read() if ret: if not str(camera_source).isnumeric(): # Caso IP frame = cv2.resize( frame, (dlc.CONFIG["FRAME_WIDTH"], dlc.CONFIG["FRAME_HEIGHT"])) # Atingiu intervalo, dispara reconhecimento if (frame_count % frame_interval) == 0: # Exibir detalhe (debug mode) if dlc.CONFIG["DEBUG"]: start_recognition = time.time() # Processar reconhecimento facial no frame faces = recognition.identify(frame) # Exibir detalhe (debug mode) if dlc.CONFIG["DEBUG"]: elapsed = round(time.time() - start_recognition, 3) dlc.console( 'Origem: {_camera}\nTempo de proc.: {_elapsed}s\nFaces: {_faces}\n' .format(_camera=camera_name, _elapsed=elapsed, _faces=[{ "nome": face.name, "prob.": round(face.probability, 3) * 100, "L2": round(face.l2, 3) } for face in faces])) # Validar FPS atual (caso extrapolar intervalo definido, atualiza variaveis) end_time = time.time() if (end_time - start_time) > fps_display_interval: frame_rate = int(frame_count / (end_time - start_time)) start_time = time.time() frame_count = 0 # Atualiza contador de frames frame_count += 1 # Adiconar marcacoes e exibir resultado if dlc.CONFIG["SHOW_OVERLAYS"]: cv_utils.add_overlays(frame, faces, frame_rate) if dlc.CONFIG["SHOW_FRAME"]: cv2.imshow(window_name, frame) # Validar interrupcao da opecacao if cv2.waitKey(20) == 27: # exit on ESC break # Remover camera da memoria if dlc.CONFIG["SHOW_FRAME"]: cv2.destroyWindow(window_name)