コード例 #1
0
def main():
    # データセットの取得
    datasets = get_datasets(DATASETS_CSV,
                            test_size=TEST_SIZE,
                            image_size=IMAGE_SIZE)
    train_data, _, _, _ = datasets

    # TODO
    # 29 0.9961215257644653
    # 30回目で過学習により5割減

    # 30回に分けてトレーニング実行
    max_steps = 25
    run_training(
        datasets,
        tensorboard_path=TENSORBOARD_PATH,
        checkpoint_path=MODEL_PATH,
        # 分類数
        num_classes=len(get_labels(DATASETS_CSV)),
        # 画像サイズ
        image_size=IMAGE_SIZE,
        # ピクセルのベクトル数 3=カラー,1=モノクロ
        channel=CHANNELS,
        # 学習実行回数
        max_steps=max_steps,
        # 1エポックで学習するデータサイズ
        batch_size=int(len(train_data) / max_steps),
        # 学習率
        learning_rate=1e-4)
コード例 #2
0
def mkdir(path):
    folder = os.path.exists(path)
    if not folder:
        os.mkdir(path)
        print("====new folder====")
        print("========ok========")
    else:
        print("----There is this folder! ----")


mkdir("pic_save")

detection_model_path = 'haarcascade_frontalface_default.xml'
emotion_model_path = 'fer2013_mini_XCEPTION.102-0.66.hdf5'
emotion_labels = get_labels('fer2013')

# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)

# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[
    1:3]  # ???1,3 represent what
# starting lists for calculating modes
emotion_window = []
コード例 #3
0
is_EPGA = True

image_name = '3.jpg'
main_path = 'test-images/'
text_left = -20
text_top = -20
text_gap = 20
font_size = 0.6
font_thickness = 2

image_path = main_path + image_name
stored_path = main_path + '{}-{}_'.format(is_light_net,
                                          is_EGA) + 'result_' + image_name

if is_EGA:
    emotion_labels = get_labels('ferplus')
else:
    emotion_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
age_labels = get_labels('adience')
pose_labels = get_labels('aflw')

if is_single_task:
    if is_light_net:
        MODEL = 'mini_xception'
        # emotion_path='train_weights/EmotionNetminixception/expw/freezing_true-drouout_false_1__06-0.56.hdf5'
        # emotion_path = 'train_weights/EmotionNetminixception/fer2013/fer2013mini_XCEPTION.95-0.66.hdf5'
        # gender_age_path=''
        # gender_path='train_weights/GenderNetminixception/adience/freezing_true-drouout_false_6__12-0.86.hdf5'
        # gender_path = 'train_weights/GenderNetminixception/adience/simple_CNN.81-0.96.hdf5'
        # gender_path='train_weights/GenderNetminixception/imdb/freezing_true-drouout_false_1__05-0.95.hdf5'
コード例 #4
0
def clicked():
        
        label = tk.Label(root)
        #load the file path
        image_path = askopenfilename()
        print image_path
        #open the image
        image_path1 = Image.open(image_path)
        image_path1 = image_path1.resize((200, 200), Image.ANTIALIAS)
        label.image_path1 = ImageTk.PhotoImage(image_path1)
        label['image'] = label.image_path1
        label.pack()

        detection_model_path = '/home/akanksha/code/trained_models/detection_models/haarcascade_frontalface_default.xml'
	emotion_model_path = '/home/akanksha/code/trained_models/emotion_modelsfer2013_mini_XCEPTION.86-0.43.hdf5'
	emotion_labels = get_labels('fer2013')

	#loading models
	face_detection = cv2.CascadeClassifier(detection_model_path)
	emotion_classifier = load_model(emotion_model_path, compile=False)

	# getting input model shapes for inference
	emotion_target_size = emotion_classifier.input_shape[1:3]



	# loading images
	pil_image = image.load_img(image_path, grayscale=True)
	gray_image = image.img_to_array(pil_image)
	gray_image = np.squeeze(gray_image)
	gray_image = gray_image.astype('uint8')

	faces = face_detection.detectMultiScale(gray_image, 1.3, 5)
	if (len(faces)==0):
		print("Please enter a valid image!")
		z = Label(root, text='Please enter a valid image!',background="white")
	        z.pack()
	
	var = 0
	for (x, y, w, h) in faces:
		gray_face = gray_image[y:y+w , x:x+h]
		gray_face = cv2.resize(gray_face, (emotion_target_size))
		gray_face = preprocess_input(gray_face)
		gray_face = np.expand_dims(gray_face, 0)
		gray_face = np.expand_dims(gray_face, -1)
		emotion_proba = emotion_classifier.predict(gray_face)
		print("------------------------------------------------------------------------")
		print("Probabilities of each class: ")
		print(" 0:angry , 1:disgust , 2:fear , 3:happy , 4:sad , 5:surprise , 6:neutral ")
		print(emotion_proba)
		emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
		emotion_text = emotion_labels[emotion_label_arg]
		print("-------------------------------------------------------------------------")
		print("Emotion class: ")	
		print(emotion_text)
		print("--------------------------------------------------------------------------")
		var = np.amax(emotion_proba)
		print("Maximum probability emotion: ")
		print(var)
		c = Label(root, text='MAXIMUM PROBABILITY EMOTION: ',background="white")
	        c.pack()
		d = Label(root, text=var,background="white")
	        d.pack()
	        f = Label(root, text="EMOTION LABEL: ",background="white")
	        f.pack()
	        e = Label(root, text=emotion_text,background="white")
	        e.pack()
		
		
		var = var * 100
		if(emotion_text == "anger"):
			S = 2.36*(math.log(0.33*var + 1.00))

		elif(emotion_text == "disguist"):
			S = 7.27*(math.log(0.01*var + 1.02))

		elif(emotion_text == "fear"):
			S = 1.76*(math.log(1.36*var + 1.00))

		elif(emotion_text == "happy"):
			S = -7.56*(math.log(-0.003*var + 1.01))

		elif(emotion_text == "sad"):
			S = 2.85*(math.log(0.13*var + 1.01))

		elif(emotion_text == "surprise"):
			S = 2.45*(math.log(0.29*var + 1.00))
			
		elif(emotion_text == "neutral"):
			S = 5.05*(math.log(0.015*var + 1.016))

		print("Stress Value:")
	   	print(S)
		a = Label(root, text='STRESS VALUE(range:0-9): ',background="white")
		a.pack()
		b = Label(root, text=S,background="white")
		b.pack()
コード例 #5
0
            axis_array[row_arg, col_arg].axis('off')
            axis_array[row_arg, col_arg].imshow(image, cmap=cmap)
            axis_array[row_arg, col_arg].set_title(titles[image_arg])
            image_arg = image_arg + 1
    plt.tight_layout()

if __name__ == '__main__':
    # from utils.data_manager import DataManager
    from datasets import get_labels
    from keras.models import load_model
    import pickle

    # dataset_name = 'fer2013'
    # model_path = '../trained_models/emotion_models/simple_CNN.985-0.66.hdf5'
    dataset_name = 'fer2013'
    class_gender = get_labels(dataset_name)
    # data_manager = DataManager(dataset_name)
    # face, emotions = data_manager.get_data()
    faces = pickle.load(open('face.pkl', 'rb'))
    emotions = pickle.load(open('emotion.pkl', 'rb'))
    pretty_imshow(plt.gca(), make_mosaic(faces[:4], 2, 2), cmap='gray')
    plt.show()

    """
    image_arg = 0
    face = faces[image_arg:image_arg + 1]
    emotion = emotions[image_arg:image_arg + 1]
    display_image(face, emotion, class_decoder)
    plt.show()
    
    normal_imshow(plt.gca(), make_mosaic(faces[:4], 3, 3), cmap='gray')
コード例 #6
0
from utils import extract_left_eye_center, extract_right_eye_center, get_rotation_matrix, crop_image
import importlib, cv2, json, os.path, random, glob, multiprocessing, argparse, fcntl, imghdr, dlib, re

#aligner constants
scale = 1
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("/home/gc1569/Image_collector/face_classification/"\
                                     "src/shape_predictor_68_face_landmarks.dat")
#gender prediction constants
detection_model_path = '/home/gc1569/Image_collector/face_classification'\
'/trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '/home/gc1569/Image_collector/face_classification'\
'/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
gender_model_path = '/home/gc1569/Image_collector/face_classification'\
'/trained_models/gender_models/simple_CNN.81-0.96.hdf5'
emotion_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
#loading models
face_detection = load_detection_model(detection_model_path)
gender_classifier = load_model(gender_model_path, compile=False)
#getting input model shapes for inference
gender_target_size = gender_classifier.input_shape[1:3]
#hyper-parameters for bounding boxes shape
gender_offsets = (30, 60)
gender_offsets = (10, 10)


#crops images to face (in seperate iamges if more faces) and output the landmark features in a text file
def align_face(name):
    input_image, output_image = source + name, destination + name
コード例 #7
0
def emotion():
    # parameters for loading data and images
    emotion_text = ""
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        if (emotion_text):
            return emotion_text
            break

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
コード例 #8
0
    if pvalue < 0.05:
        # rejeitando a hipotese de que nao existe diferenca entre
        # os classificadores
        rate_matrix = rate_matrix.melt(var_name='groups', value_name='values')
        nemenyi_results = posthoc_nemenyi(rate_matrix, val_col='values', group_col='groups')
        print('teste de nemenyi')
        for i in range(0, nemenyi_results.shape[0]):
            print(nemenyi_results.iloc[i])


data_set_classes = ['BRICKFACE', 'SKY', 'FOLIAGE', 'CEMENT', 'WINDOW', 'PATH', 'GRASS']
training_set = datasets.read_set('segmentation.test')
training_set = datasets.normalize_data_set(training_set)
training_shape_view, training_rgb_view = datasets.split_views(training_set)
data_set_labels = datasets.get_labels(training_set)

# removendo as colunas 3, 4 e 5 do training set original e da shape view
# pois todas tem variabilidade muito baixa (var -> 0)
training_set = training_set.drop(columns=['REGION-PIXEL-COUNT',
                                          'SHORT-LINE-DENSITY-5',
                                          'SHORT-LINE-DENSITY-2'])
training_shape_view = training_shape_view.drop(columns=['REGION-PIXEL-COUNT',
                                                        'SHORT-LINE-DENSITY-5',
                                                        'SHORT-LINE-DENSITY-2'])

# melhores ks obtidos para o conjunto de treinamento e a vies
# isto foi obtido por validacao cruzada (validate_k do arquivo knn.py)
training_set_k = 15
shape_view_k = 19
rgb_view_k = 15
コード例 #9
0
            axis_array[row_arg, col_arg].imshow(image, cmap=cmap)
            axis_array[row_arg, col_arg].set_title(titles[image_arg])
            image_arg = image_arg + 1
    plt.tight_layout()


if __name__ == '__main__':
    #from utils.data_manager import DataManager
    from datasets import get_labels
    from keras.models import load_model
    import pickle

    #dataset_name = 'fer2013'
    #model_path = '../trained_models/emotion_models/simple_CNN.985-0.66.hdf5'
    dataset_name = 'fer2018'
    class_decoder = get_labels(dataset_name)
    #data_manager = DataManager(dataset_name)
    #faces, emotions = data_manager.get_data()
    faces = pickle.load(open('faces.pkl', 'rb'))
    emotions = pickle.load(open('emotions.pkl', 'rb'))
    pretty_imshow(plt.gca(), make_mosaic(faces[:4], 2, 2), cmap='gray')
    plt.show()
    """
    image_arg = 0
    face = faces[image_arg:image_arg + 1]
    emotion = emotions[image_arg:image_arg + 1]
    display_image(face, emotion, class_decoder)
    plt.show()

    normal_imshow(plt.gca(), make_mosaic(faces[:4], 3, 3), cmap='gray')
    plt.show()