コード例 #1
0
def main():
    logger.info('Load InceptionV3 model')
    inceptionv3 = AgenderNetInceptionV3()
    inceptionv3.setWeight('trainweight/inceptionv3_2/model.16-3.7887-0.9004-6.6744.h5')
    
    logger.info('Load MobileNetV2 model')
    mobilenetv2 = AgenderNetMobileNetV2()
    mobilenetv2.setWeight('trainweight/mobilenetv2/model.10-3.8290-0.8965-6.9498.h5')

    logger.info('Load SSRNet model')
    ssrnet = SSRNet(64, [3, 3, 3], 1.0, 1.0)
    ssrnet.setWeight('trainweight/ssrnet/model.37-7.3318-0.8643-7.1952.h5')

    logger.info('Load pretrain imdb model')
    imdb_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    imdb_model.load_weights("tes_ssrnet/imdb_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    imdb_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    imdb_model_gender.load_weights("tes_ssrnet/imdb_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    logger.info('Load pretrain wiki model')
    wiki_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    wiki_model.load_weights("tes_ssrnet/wiki_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    wiki_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    wiki_model_gender.load_weights("tes_ssrnet/wiki_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    logger.info('Load pretrain morph model')
    morph_model = SSR_net(64, [3, 3, 3], 1.0, 1.0)()
    morph_model.load_weights("tes_ssrnet/morph_age_ssrnet_3_3_3_64_1.0_1.0.h5")
    
    morph_model_gender = SSR_net_general(64, [3, 3, 3], 1.0, 1.0)()
    morph_model_gender.load_weights("tes_ssrnet/morph_gender_ssrnet_3_3_3_64_1.0_1.0.h5")

    utk = pd.read_csv('dataset/UTKface.csv')
    fgnet = pd.read_csv('dataset/FGNET.csv')

    utk_paths = utk['full_path'].values
    fgnet_paths = fgnet['full_path'].values

    logger.info('Read UTKface aligned images')
    utk_images = [cv2.imread('UTKface_aligned/'+path) for path in tqdm(utk_paths)]

    logger.info('Read FGNET aligned images')
    fgnet_images = [cv2.imread('FGNET_aligned/'+path) for path in tqdm(fgnet_paths)]
    
    utk_X = np.array(utk_images)
    fgnet_X = np.array(fgnet_images)

    utk_pred_age = dict()
    utk_pred_gender = dict()
    fgnet_pred_age = dict()

    logger.info('Predict with InceptionV3')
    start = time.time()
    utk_pred_gender['inceptionv3'], utk_pred_age['inceptionv3'] = get_result(inceptionv3, utk_X)
    _, fgnet_pred_age['inceptionv3'] = get_result(inceptionv3, fgnet_X)
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    del utk_X, fgnet_X
    logger.info('Resize image to 96 for MobileNetV2')
    utk_images = [cv2.resize(image, (96, 96), interpolation = cv2.INTER_CUBIC) for image in tqdm(utk_images)]
    fgnet_images = [cv2.resize(image, (96, 96), interpolation = cv2.INTER_CUBIC) for image in tqdm(fgnet_images)]
    utk_X = np.array(utk_images)
    fgnet_X = np.array(fgnet_images)
    
    logger.info('Predict with MobileNetV2')
    start = time.time()
    utk_pred_gender['mobilenetv2'], utk_pred_age['mobilenetv2'] = get_result(mobilenetv2, utk_X)
    _, fgnet_pred_age['mobilenetv2'] = get_result(mobilenetv2, fgnet_X)
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    del utk_X, fgnet_X
    logger.info('Resize image to 64 for SSR-Net')
    utk_images = [cv2.resize(image, (64, 64), interpolation = cv2.INTER_CUBIC) for image in tqdm(utk_images)]
    fgnet_images = [cv2.resize(image, (64, 64), interpolation = cv2.INTER_CUBIC) for image in tqdm(fgnet_images)]
    utk_X = np.array(utk_images)
    fgnet_X = np.array(fgnet_images)

    logger.info('Predict with SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet'], utk_pred_age['ssrnet'] = get_result(ssrnet, utk_X)
    _, fgnet_pred_age['ssrnet'] = get_result(ssrnet, fgnet_X)
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    logger.info('Predict with IMDB_SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet-imdb'] = np.around(imdb_model_gender.predict(utk_X).squeeze()).astype('int')
    utk_pred_age['ssrnet-imdb'] = imdb_model.predict(utk_X).squeeze()
    fgnet_pred_age['ssrnet-imdb'] = imdb_model.predict(fgnet_X).squeeze()
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    logger.info('Predict with Wiki_SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet-wiki'] = np.around(wiki_model_gender.predict(utk_X).squeeze()).astype('int')
    utk_pred_age['ssrnet-wiki'] = wiki_model.predict(utk_X).squeeze()
    fgnet_pred_age['ssrnet-wiki'] = wiki_model.predict(fgnet_X).squeeze()
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    logger.info('Predict with Morph_SSR-Net')
    start = time.time()
    utk_pred_gender['ssrnet-morph'] = np.around(morph_model_gender.predict(utk_X).squeeze()).astype('int')
    utk_pred_age['ssrnet-morph'] = morph_model.predict(utk_X).squeeze()
    fgnet_pred_age['ssrnet-morph'] = morph_model.predict(fgnet_X).squeeze()
    elapsed = time.time() - start
    logger.info('Time elapsed {:.2f} sec'.format(elapsed))

    utk_pred_age = pd.DataFrame.from_dict(utk_pred_age)
    utk_pred_gender = pd.DataFrame.from_dict(utk_pred_gender)
    fgnet_pred_age = pd.DataFrame.from_dict(fgnet_pred_age)

    utk_pred_age = pd.concat([utk['age'], utk_pred_age], axis=1)
    utk_pred_gender = pd.concat([utk['gender'], utk_pred_gender], axis=1)
    fgnet_pred_age = pd.concat([fgnet['age'], fgnet_pred_age], axis=1)

    utk_pred_age.to_csv('result/utk_age_prediction.csv', index=False)
    utk_pred_age.to_csv('result/utk_gender_prediction.csv', index=False)
    fgnet_pred_age.to_csv('result/fgnet_age_prediction.csv', index=False)
コード例 #2
0
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        faces_2 = np.empty((1, img_size, img_size, 3))
        faces_2[0, :, :, :] = cv2.resize(rgb_image[y1:y2 + 1, x1:x2 + 1, :],
                                         (img_size, img_size))
        faces_2[0, :, :, :] = cv2.normalize(faces_2[0, :, :, :],
                                            None,
                                            alpha=0,
                                            beta=255,
                                            norm_type=cv2.NORM_MINMAX)

        predicted_ages = model.predict(faces_2)
        predicted_genders = model_gender.predict(faces_2)
        print(str(predicted_ages))

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        emotion_window.append(emotion_text)

        if len(emotion_window) > frame_window:
            emotion_window.pop(0)
        try:
            emotion_mode = mode(emotion_window)