def get_age_female(draw): img = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB) if age_gender_one_model: age, gender, _ = imgProcessing.age_gender_fun(img) else: age = imgProcessing.age_fun(img) gender = imgProcessing.gender_fun(img) is_female = 0 if FacialImageProcessing.is_male(gender) else 1 #print(age,gender,is_female) return age, is_female
plt.imshow( cv2.cvtColor(all_facial_images[filtered_clusters[i][j]], cv2.COLOR_BGR2RGB)) plt.axis('off') plt_ind += 1 plt.show() if __name__ == '__main__': config = ConfigParser() config.read('config.txt') default_config = config['DEFAULT'] minDaysDifferenceBetweenPhotoMDates = int( default_config['MinDaysDifferenceBetweenPhotoMDates']) minNoPhotos = int(default_config['MinNoPhotos']) minNoFrames = int(default_config['MinNoFrames']) distanceThreshold = float(default_config['DistanceThreshold']) minFaceWidthPercent = float(default_config['MinFaceWidthPercent']) / 100 print('minDaysDifferenceBetweenPhotoMDates:', minDaysDifferenceBetweenPhotoMDates, ' minNoPhotos:', minNoPhotos, 'minNoFrames:', minNoFrames, ' distanceThreshold:', distanceThreshold, ' minFaceWidthPercent:', minFaceWidthPercent) imgProcessing = FacialImageProcessing(print_stat=False, minsize=112) process_album(imgProcessing, default_config['InputDirectory']) #video_filepath='D:/datasets/my_photos/iphone/IMG_2220.MOV' #'D:/datasets/my_photos/iphone/video.AVI' #process_video(imgProcessing,video_filepath,time.gmtime(os.path.getmtime(video_filepath))) imgProcessing.close()
from tensorflow.keras.models import load_model ## landmark 설정 landmark_model = "../models/shape_predictor_68_face_landmarks.dat" landmark_detector = dlib.shape_predictor(landmark_model) ## 클래스 정의 idx_to_class = {0: 'Anger', 1: 'Disgust', 2: 'Fear', 3: 'Happiness', 4: 'Neutral', 5: 'Sadness', 6: 'Surprise'} ## 모델 로드 base_model = load_model('../models/affectnet_emotions/mobilenet_7.h5') # 이미지 전처리 imgProcessing = FacialImageProcessing(False) INPUT_SIZE = (224, 224) # landmark detection using dlib def lanmark(image, face): # 얼굴에서 68개 점 찾기 landmarks = landmark_detector(image, face) # create list to contain landmarks landmark_list = [] # append (x, y) in landmark_list for p in landmarks.parts(): landmark_list.append([p.x, p.y]) cv2.circle(image, (p.x, p.y), 2, (255, 255, 255), -1)
import os import cv2 from sklearn import preprocessing, metrics from sklearn.metrics.pairwise import pairwise_distances import scipy.cluster.hierarchy as hac from scipy.spatial.distance import squareform import csv import numpy as np from facial_analysis import FacialImageProcessing from facerec_test import is_image imgProcessing = FacialImageProcessing() subject_ids = [] face_features = [] ages = [] genders = [] all_facial_images = [] def rect_intersection_square(a, b): x = max(a[0], b[0]) y = max(a[1], b[1]) w = min(a[2], b[2]) - x h = min(a[3], b[3]) - y if w < 0 or h < 0: w = h = 0 return w * h
adience_range = False adience_age_list = [(0, 2), (4, 6), (8, 12), (15, 20), (25, 32), (38, 43), (48, 53), (60, 100)] def get_age_range(real_age): for ind in range(len(adience_age_list) - 1): if real_age <= (adience_age_list[ind][1] + adience_age_list[ind + 1][0]) / 2: return ind return len(adience_age_list) - 1 if True: from facial_analysis import FacialImageProcessing, is_image, age_gender_one_model imgProcessing = FacialImageProcessing(False) def get_age_female(draw): img = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB) if age_gender_one_model: age, gender, _ = imgProcessing.age_gender_fun(img) else: age = imgProcessing.age_fun(img) gender = imgProcessing.gender_fun(img) is_female = 0 if FacialImageProcessing.is_male(gender) else 1 #print(age,gender,is_female) return age, is_female elif False: import tensorflow as tf adience_range = True RESIZE_FINAL = 227
import tensorflow import pathlib import tensorflow as tf print(tf.__version__) base_model=load_model('../models/affectnet_emotions/mobilenet_7.h5') feature_extractor_model=Model(base_model.input,[base_model.get_layer('global_pooling').output,base_model.get_layer('feats').output,base_model.output]) feature_extractor_model.summary() _,w,h,_=feature_extractor_model.input.shape DATA_DIR='C:/Users/ccaa9/PycharmProjects/dataset/AFEW/' emotion_to_index = {'Angry':0, 'Disgust':1, 'Fear':2, 'Happy':3, 'Neutral':4, 'Sad':5, 'Surprise':6} imgProcessing=FacialImageProcessing(False) print(tf.__version__) INPUT_SIZE = (224,224) ### extract frames def get_iou(bb1, bb2): # determine the coordinates of the intersection rectangle x_left = max(bb1[0], bb2[0]) y_top = max(bb1[1], bb2[1]) x_right = min(bb1[2], bb2[2]) y_bottom = min(bb1[3], bb2[3]) if x_right < x_left or y_bottom < y_top: return 0.0 # The intersection of two axis-aligned bounding boxes is always an