コード例 #1
0
        try:
            if not is_light_net:
                face = cv2.resize(rgb_face,(target_size),interpolation=cv2.INTER_CUBIC)
                face = preprocess_input(face, False)
                face = np.expand_dims(face, 0)
            else:
                face = cv2.resize(gray_face, (target_size),interpolation=cv2.INTER_CUBIC)
                face = preprocess_input(face, False)
                face = np.expand_dims(face, 0)
                face = np.expand_dims(face, -1)
        except:
            continue

        start_time = time.clock()
        if is_single_task:
            emotion_label = emotion_model.predict(face)
            gender_label = gender_model.predict(face)
            pose_label = pose_model.predict(face)
            age_label= age_model.predict(face)   
        else:
            if is_EGA:
                emotion_label,gender_label,age_label = EGA_multi_model.predict(face)
            elif is_EPGA:
                emotion_label,pose_label,gender_label,age_label = EPGA_multi_model.predict(face)
                print('predicted result')
            else:
                emotion_label,pose_label,age_label=EPA_multi_model.predict(face)
        end_time = time.clock()
        print('spend_time:',end_time-start_time)

        if is_single_task:
コード例 #2
0
                              interpolation=cv2.INTER_CUBIC)
            face = preprocess_input(face, False)
            face = np.expand_dims(face, 0)
            face = np.expand_dims(face, -1)
    except:
        continue

    start_time = time.clock()
    if is_single_task:
        emotion_label = emotion_model.predict(face)
        gender_label = gender_model.predict(face)
        pose_label = pose_model.predict(face)
        age_label = age_model.predict(face)
    else:
        if is_EGA:
            emotion_label, gender_label, age_label = EGA_multi_model.predict(
                face)
        elif is_EPA:
            emotion_label, pose_label, age_label = EPA_multi_model.predict(
                face)
        elif is_EPGA:
            print('here')
            emotion_label, pose_label, gender_label, age_label = EPGA_multi_model.predict(
                face)
    end_time = time.clock()
    print('spend_time:', end_time - start_time)

    if is_single_task:
        emotion_label_arg = np.argmax(emotion_label)
        gender_label_arg = np.argmax(gender_label)
        age_label_arg = np.argmax(age_label)
        pose_label_arg = np.argmax(pose_label)
コード例 #3
0
ファイル: main.py プロジェクト: jasonchan117/LeopardAnkle
}

if __name__ == '__main__':

    tf.keras.layers.Conv2D()
    data_x, data_y = loadData(img_path)
    #Classification
    x_train, y_train, x_val, y_val, x_test, y_test = prePrec(data_x, data_y)
    model_clas = Net('classification')
    model_clas = train(model_clas,
                       x_train,
                       y_train,
                       x_val,
                       y_val,
                       parameters=parameter_set['classification'])[0]
    print(model_clas.predict(x_test))
    print('Accuracy on testset:{}'.format(
        accuracy_score(y_test, model_clas.predict(x_test))))
    #Regression
    model_reg = Net('regression')
    model_reg = train(model_reg,
                      x_train,
                      y_train,
                      x_val,
                      y_val,
                      parameters=parameter_set['regression'])[0]
    mse = tf.keras.losses.MeanSquaredError()
    loss = mse((y_val - 15) / 15., model_reg.predict(x_val).reshape(-1, ))
    print('MSE on testset:{}'.format(loss))

    #######################MobileNet-v2############################