예제 #1
0
def print_detections(file):
    img = dlib.load_rgb_image(file)
    dets, scores, idx = detector.run(img, 1, -1)
    return scores
예제 #2
0
#   command:
#       sudo apt-get install cmake
#
#   Also note that this example requires Numpy which can be installed
#   via the command:
#       pip install numpy

import sys
import dlib

detector = dlib.get_frontal_face_detector()
win = dlib.image_window()

for f in sys.argv[1:]:
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)
    # The 1 in the second argument indicates that we should upsample the image
    # 1 time.  This will make everything bigger and allow us to detect more
    # faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for i, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            i, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Finally, if you really want to you can ask the detector to tell you the score
예제 #3
0
import dlib
from scipy.spatial.distance import euclidean
import pickle

# Set the model paths
shape_predictor_path = 'shape_predictor_5_face_landmarks.dat'
face_rec_model_path = 'dlib_face_recognition_resnet_model_v1.dat'

obama_vectors = pickle.load(open("obama.p", "rb"))
# Set the detector and shape predictor functions
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(shape_predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

# Load the image you want to recognize
img = dlib.load_rgb_image('obama.jpg')

dets = detector(img, 1)

for k, d in enumerate(dets):
    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        k, d.left(), d.top(), d.right(), d.bottom()))
    # Get the landmarks/parts for the face in box d.
    shape = sp(img, d)
    face_descriptor = facerec.compute_face_descriptor(img, shape)
    #print(face_descriptor)
    distance = euclidean(face_descriptor, obama_vectors)
    print(
        'Distance between pre-computed facial vectors and picture we give is: '
        + str(distance))
예제 #4
0
def predidct_age_gender_race(save_prediction_at, imgs_path = 'cropped_faces/'):
    img_names = [os.path.join(imgs_path, x) for x in os.listdir(imgs_path)]
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # model_fair_7 = torchvision.models.resnet34(pretrained=True)
    # model_fair_7.fc = nn.Linear(model_fair_7.fc.in_features, 18)
    # model_fair_7.load_state_dict(torch.load('fair_face_models/fairface_alldata_20191111.pt'))
    # model_fair_7 = model_fair_7.to(device)
    # model_fair_7.eval()

    model_fair_4 = torchvision.models.resnet34(pretrained=True)
    model_fair_4.fc = nn.Linear(model_fair_4.fc.in_features, 18)
    model_fair_4.load_state_dict(torch.load('fair_face_models/fairface_alldata_4race_20191111.pt'))
    model_fair_4 = model_fair_4.to(device)
    model_fair_4.eval()

    trans = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    # img pth of face images
    face_names = []
    # list within a list. Each sublist contains scores for all races. Take max for predicted race
    race_scores_fair = []
    gender_scores_fair = []
    age_scores_fair = []
    race_preds_fair = []
    gender_preds_fair = []
    age_preds_fair = []
    race_scores_fair_4 = []
    race_preds_fair_4 = []

    for index, img_name in enumerate(img_names):
        if index % 1000 == 0:
            print("Predicting... {}/{}".format(index, len(img_names)))

        face_names.append(img_name)
        image = dlib.load_rgb_image(img_name)
        image = trans(image)
        image = image.view(1, 3, 224, 224)  # reshape image to match model dimensions (1 batch size)
        image = image.to(device)

        # # fair
        # outputs = model_fair_7(image)
        # outputs = outputs.cpu().detach().numpy()
        # outputs = np.squeeze(outputs)

        # race_outputs = outputs[:7]
        # gender_outputs = outputs[7:9]
        # age_outputs = outputs[9:18]

        # race_score = np.exp(race_outputs) / np.sum(np.exp(race_outputs))
        # gender_score = np.exp(gender_outputs) / np.sum(np.exp(gender_outputs))
        # age_score = np.exp(age_outputs) / np.sum(np.exp(age_outputs))

        # race_pred = np.argmax(race_score)
        # gender_pred = np.argmax(gender_score)
        # age_pred = np.argmax(age_score)

        # race_scores_fair.append(race_score)
        # gender_scores_fair.append(gender_score)
        # age_scores_fair.append(age_score)

        # race_preds_fair.append(race_pred)
        # gender_preds_fair.append(gender_pred)
        # age_preds_fair.append(age_pred)

        # fair 4 class
        outputs = model_fair_4(image)
        outputs = outputs.cpu().detach().numpy()
        outputs = np.squeeze(outputs)

        race_outputs = outputs[:4]
        race_score = np.exp(race_outputs) / np.sum(np.exp(race_outputs))
        race_pred = np.argmax(race_score)

        race_scores_fair_4.append(race_score)
        race_preds_fair_4.append(race_pred)

    result = pd.DataFrame([face_names,
                           race_preds_fair,
                           race_preds_fair_4,
                           gender_preds_fair,
                           age_preds_fair,
                           race_scores_fair, race_scores_fair_4,
                           gender_scores_fair,
                           age_scores_fair, ]).T
    result.columns = ['face_name_align',
                      'race_preds_fair',
                      'race_preds_fair_4',
                      'gender_preds_fair',
                      'age_preds_fair',
                      'race_scores_fair',
                      'race_scores_fair_4',
                      'gender_scores_fair',
                      'age_scores_fair']
    result.loc[result['race_preds_fair'] == 0, 'race'] = 'White'
    result.loc[result['race_preds_fair'] == 1, 'race'] = 'Black'
    result.loc[result['race_preds_fair'] == 2, 'race'] = 'Latino_Hispanic'
    result.loc[result['race_preds_fair'] == 3, 'race'] = 'East Asian'
    result.loc[result['race_preds_fair'] == 4, 'race'] = 'Southeast Asian'
    result.loc[result['race_preds_fair'] == 5, 'race'] = 'Indian'
    result.loc[result['race_preds_fair'] == 6, 'race'] = 'Middle Eastern'

    # race fair 4

    result.loc[result['race_preds_fair_4'] == 0, 'race4'] = 'White'
    result.loc[result['race_preds_fair_4'] == 1, 'race4'] = 'Black'
    result.loc[result['race_preds_fair_4'] == 2, 'race4'] = 'Asian'
    result.loc[result['race_preds_fair_4'] == 3, 'race4'] = 'Indian'

    # gender
    result.loc[result['gender_preds_fair'] == 0, 'gender'] = 'Male'
    result.loc[result['gender_preds_fair'] == 1, 'gender'] = 'Female'

    # age
    result.loc[result['age_preds_fair'] == 0, 'age'] = '0-2'
    result.loc[result['age_preds_fair'] == 1, 'age'] = '3-9'
    result.loc[result['age_preds_fair'] == 2, 'age'] = '10-19'
    result.loc[result['age_preds_fair'] == 3, 'age'] = '20-29'
    result.loc[result['age_preds_fair'] == 4, 'age'] = '30-39'
    result.loc[result['age_preds_fair'] == 5, 'age'] = '40-49'
    result.loc[result['age_preds_fair'] == 6, 'age'] = '50-59'
    result.loc[result['age_preds_fair'] == 7, 'age'] = '60-69'
    result.loc[result['age_preds_fair'] == 8, 'age'] = '70+'

    result[['face_name_align',
            'race', 'race4',
            'gender', 'age',
            'race_scores_fair', 'race_scores_fair_4',
            'gender_scores_fair', 'age_scores_fair']].to_csv(save_prediction_at, index=False)

    print("saved results at ", save_prediction_at)
    opt = TestOptions().parse()
    #  face_detector = dlib.get_frontal_face_detector()
    face_detector = dlib.cnn_face_detection_model_v1(
        './pretrain_models/mmod_human_face_detector.dat')
    lmk_predictor = dlib.shape_predictor(
        './pretrain_models/shape_predictor_5_face_landmarks.dat')
    template_path = './pretrain_models/FFHQ_template.npy'
    enhance_model = def_models(opt)

    for img_name in os.listdir(opt.src_dir):
        img_path = os.path.join(opt.src_dir, img_name)
        save_current_dir = os.path.join(opt.results_dir,
                                        os.path.splitext(img_name)[0])
        os.makedirs(save_current_dir, exist_ok=True)
        print('======> Loading image', img_path)
        img = dlib.load_rgb_image(img_path)
        aligned_faces, tform_params = detect_and_align_faces(
            img, face_detector, lmk_predictor, template_path)
        # Save aligned LQ faces
        save_lq_dir = os.path.join(save_current_dir, 'LQ_faces')
        os.makedirs(save_lq_dir, exist_ok=True)
        print('======> Saving aligned LQ faces to', save_lq_dir)
        save_imgs(aligned_faces, save_lq_dir)

        hq_faces, lq_parse_maps, back_map = enhance_faces(
            aligned_faces, enhance_model)
        # Save LQ parsing maps and enhanced faces
        save_parse_dir = os.path.join(save_current_dir, 'ParseMaps')
        save_hq_dir = os.path.join(save_current_dir, 'HQ')
        os.makedirs(save_parse_dir, exist_ok=True)
        os.makedirs(save_hq_dir, exist_ok=True)
from socket import timeout
import psycopg2
import cv2
import dlib
from psycopg2.extras import execute_values
import os
from FaceRecognitionFunctions import *



name = "Harbhajan_Singh"
# img = dlib.load_rgb_image(work_dir + name + '/' + name + '_0001.jpg')
img = dlib.load_rgb_image("/tmp/harbajan.jpg")
face_desc = get_face_embedding(img)
face_emb = vec2list(face_desc)
print(retrieve(face_emb))
예제 #7
0
def unlock_screen():
    base_folder = "./"
    path = os.path.abspath(base_folder)
    abs_path = path + "/"
    # Проверяем есть ли папки "Description_dataset" и "dataset"
    if os.path.exists(abs_path + "Description_dataset"):
        shutil.rmtree(abs_path + "Description_dataset", ignore_errors=True)
    if os.path.exists(abs_path + "dataset"):
        shutil.rmtree(abs_path + "dataset", ignore_errors=True)
# Подключаемся к камере
    cam = cv2.VideoCapture(
        "/dev/video0")  # Если путь отличается, МЕНЯЕМ ТУТ!!!!
    cam.set(3, 640)  # установить ширину видео
    cam.set(4, 480)  # установить высоту видео
    face_detector = cv2.CascadeClassifier(
        abs_path + 'haarcascade_frontalface_default.xml'
    )  # путь до Cascade, мы используем каскад Хаара по обнаружению лиц

    face_id = str(uuid.uuid4())
    # Инициализация индивидуальной выборочной грани
    count = 0
    while True:
        ret, img = cam.read()
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_detector.detectMultiScale(gray,
                                               scaleFactor=1.2,
                                               minNeighbors=5,
                                               minSize=(20, 20))

        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            count += 1
            # Сохраните захваченное изображение в папку наборов данных
            # Создадим папку
            folder = "./dataset"
            if not os.path.exists(folder):
                os.makedirs(folder)
            cv2.imwrite(
                folder + "/" + str(face_id) + '_' + str(count) + ".jpg",
                gray[y:y + h, x:x + w])

        k = cv2.waitKey(100) & 0xff  # Нажмите «ESC» для выхода из видео
        if k == 27:
            break
        elif count >= 3:  # Взять 3 образцов лица и остановить видео
            break

    cam.release()
    cv2.destroyAllWindows()

    # Извлекаем дескрипторы лица
    faces_folder_path = abs_path + "dataset"  # Аргумент. Где ищем файлы .jpg(папка)
    detector = dlib.get_frontal_face_detector()
    sp = dlib.shape_predictor(abs_path +
                              'shape_predictor_68_face_landmarks.dat')
    facerec = dlib.face_recognition_model_v1(
        abs_path + 'dlib_face_recognition_resnet_model_v1.dat')
    for f in glob.glob(os.path.join(faces_folder_path + "*/", "*.jpg")):
        img = dlib.load_rgb_image(f)
        dets = detector(img, 2)
        # Теперь обработаем каждое лицо, которое мы нашли
        for k, d in enumerate(dets):
            polygon_perimeter(
                [d.top(), d.top(), d.bottom(),
                 d.bottom()],
                [d.right(), d.left(), d.left(),
                 d.right()])
            # находим уникальные точки на лице изображения
            shape = sp(img, d)
            face_descriptor = facerec.compute_face_descriptor(
                img, shape
            )  # Вытаскиваем дескрипторы лица и сохроняем их в переменную face_descriptor
            # Сохраняем выделенные дескрипторы в разные файлы .pickle
            filename = str(
                uuid.uuid4()
            )  # даем нашему файлу .pickle уникальное имя с помощью библиотеке uuid
            # Создаем папку Description и в нее сохроняем файлы .pickle
            newpath = abs_path + "Description_dataset"
            if not os.path.exists(
                    newpath):  # Проверяем есть ли она в директории
                os.makedirs(newpath)  # Создаем папку


# Вытаскиваем из лица дескрипторы и сохроняем их в файл .pickle папку Description
            with open(abs_path + "Description_dataset/" + filename + '.pickle',
                      'wb') as file_save:
                pickle.dump(
                    face_descriptor, file_save
                )  # pickle.dump - сохранение дескрипторов в двоичный файл .pickle

    find_file = os.listdir(abs_path +
                           "Description_database/")  # Где ищем файлы
    find_file_1 = random.choice(find_file)  # Выбираем рандомно один файл
    face_rec_model_path = os.path.abspath(
        abs_path + "Description_database/" +
        find_file_1)  # узнаем абсолютный путь
    find_file_2 = os.listdir(abs_path + "Description_dataset/")
    find_file_3 = random.choice(find_file_2)
    faces_folder_path = os.path.abspath(abs_path + "Description_dataset/" +
                                        find_file_3)
    # Сравниваем дискрипторы
    with open(face_rec_model_path, 'rb') as file_load:
        file_data_description_0 = pickle.load(
            file_load
        )  # pickle.load - загружаем из двоичного файла .pickle наш дискриптор
    with open(
            faces_folder_path, 'rb'
    ) as file_load_1:  # Открываем на чтение все, что находится в переменной f и задаем все в новую переменную file_load_1
        file_data_description_1 = pickle.load(
            file_load_1
        )  # pickle.load - загружаем из всех файлов с расширением .pickle наши дискриптор
    a = distance.euclidean(
        file_data_description_0, file_data_description_1
    )  # Рассчитываем Евклидово расстояние между двумя дексрипторами лиц
    f_1 = round(
        a, 2
    )  # Округляем Евклидово расстояние между двумя дексрипторами лиц до двух знаков после запятой
    if 0 <= f_1 <= 0.45:  # Если Евклидово расстояние между двумя дексрипторами лиц меньше или равно 0.45, то...
        os.system(
            "sudo loginctl unlock-sessions")  # Разблокируем экран блокировки
        # Удаляем дискрипторы и фото лица
        shutil.rmtree(abs_path + "Description_dataset", ignore_errors=True)
        shutil.rmtree(abs_path + "dataset", ignore_errors=True)
    elif 0.46 <= f_1 <= 1:  # Если Евклидово расстояние между двумя дексрипторами лиц больше 0.60, но меньше 1, выводим сообщение о не совпадении
        shutil.rmtree(abs_path + "Description_dataset", ignore_errors=True)
        shutil.rmtree(abs_path + "dataset", ignore_errors=True)
예제 #8
0
        "Call this program like this:\n"
        "   ./face_alignment.py shape_predictor_5_face_landmarks.dat ../examples/faces/bald_guys.jpg\n"
        "You can download a trained facial shape predictor from:\n"
        "    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
    exit()

predictor_path = sys.argv[1]
face_file_path = sys.argv[2]

# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)

# Load the image using Dlib
img = dlib.load_rgb_image(face_file_path)

# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)

num_faces = len(dets)
if num_faces == 0:
    print("Sorry, there were no faces found in '{}'".format(face_file_path))
    exit()

# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
    faces.append(sp(img, detection))
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np


# In[2]:


detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('./models/shape_predictor_5_face_landmarks.dat')


# In[3]:


img = dlib.load_rgb_image('./Beauty_GAN_imgs/02.jpg')
plt.figure(figsize=(16, 10))
plt.imshow(img)
plt.axis('off')
plt.show()


# In[4]:


img_result = img.copy()
dets = detector(img, 1)
if len(dets) == 0:
    print("can't find faces!")
else:
    fig, ax = plt.subplots(1, figsize=(16, 10))
예제 #10
0
def main():
    # Load data
    landmarks_model = np.loadtxt("Landmarks68_model2017-1_face12_nomouth.anl",
                                 dtype=int)
    bfm = h5py.File("Data/model2017-1_face12_nomouth.h5", 'r')
    triangles = np.asarray(bfm['shape/representer/cells'], dtype=np.int32).T
    mean_tex = np.asarray(bfm['color/model/mean'], dtype=np.float32).reshape(
        (-1, 3))

    # Mean shape and PCA data
    mean_shape = np.asarray(bfm['shape/model/mean'], dtype=np.float32).reshape(
        (-1, 3))
    mean_shape_land = mean_shape[landmarks_model]
    sigma2_shape = np.asarray(bfm['shape/model/pcaVariance'])[:30]
    basis_shape = np.asarray(bfm['shape/model/pcaBasis']).reshape(
        (-1, 3, 199))[:, :, :30]
    basis_shape_land = basis_shape[landmarks_model]

    mean_expr = np.asarray(bfm['expression/model/mean']).reshape((-1, 3))
    mean_expr_land = mean_expr[landmarks_model]
    sigma2_expr = np.asarray(bfm['expression/model/pcaVariance'])[:20]
    basis_expr = np.asarray(bfm['expression/model/pcaBasis']).reshape(
        (-1, 3, 100))[:, :, :20]
    basis_expr_land = basis_expr[landmarks_model]

    number_landmark = len(landmarks_model)
    number_whole_points = mean_shape.shape[0]

    do_4, do_6, do_7, do_4_eval = False, False, True, False

    # Images for exercise 4
    if do_4 or do_4_eval:
        img = dlib.load_rgb_image("faces/dan2.jpg")
        # img = dlib.load_rgb_image("faces/surprise.png")
        # img = dlib.load_rgb_image("faces/exercise_6/dave1.jpg")

    # For question 6
    if do_6:
        imgs = [
            dlib.load_rgb_image(f"faces/exercise_6/frame{i}.jpg")[:, :450, :]
            for i in range(1, 5)
        ]
        img = imgs[0]

    # Load video for exercise 7
    if do_7:
        video_filep = "faces/exercise_7/smile.mp4"
        video_cap = cv.VideoCapture(video_filep)
        _ret, img = video_cap.read()
        # TODO: Make this adaptive by getting the ground truth face landmarks!
        img = img[:, :450, :]
        video_cap.release()

    # Get shape of current input image
    im_shape = img.shape

    # Instantiate the model
    model = EnergyMin(mean_shape_land, sigma2_shape, basis_shape_land,
                      mean_expr_land, sigma2_expr, basis_expr_land, im_shape)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

    # Check initialized matrices
    print("P Matrix")
    print(model.P)
    print("V Matrix")
    print(model.V)

    # Landmark points
    points_land = mean_shape_land + mean_expr_land
    S_land = np.concatenate((points_land, np.ones((number_landmark, 1))),
                            axis=1)

    # Total points of 3D face model
    whole_points = mean_shape + mean_expr
    S_whole = np.concatenate((whole_points, np.ones((number_whole_points, 1))),
                             axis=1)

    # Normalize input for better convergence - Not used at the moment
    # points_land_norm, min_max_points_land = normalize_points(points_land)
    # S_land_norm = np.concatenate((points_land_norm, np.ones((number_landmark, 1))), axis=1)
    # whole_points_norm, min_max_w_points = normalize_points(whole_points, min_max_points_land)
    # S_whole_norm = np.concatenate((whole_points_norm, np.ones((number_whole_points, 1))), axis=1)

    face_model = basis_shape, basis_expr

    if do_4:
        loss_data = exercise_4_and_5(model, optimizer, img, S_land, S_whole,
                                     face_model, triangles,
                                     number_whole_points)
    if do_6:
        exercise_6(model, imgs, S_land, S_whole, face_model, triangles,
                   number_whole_points)
    if do_7:
        # BS is batch size for estimating α
        exercise7(model,
                  video_filep,
                  S_land,
                  S_whole,
                  face_model,
                  triangles,
                  number_whole_points,
                  bs=20)
    if do_4_eval:
        loss_neutral = exercise_4_and_5(model, optimizer, img, S_land, S_whole,
                                        face_model, triangles,
                                        number_whole_points)

        alpha_neutral = model.alpha.detach().numpy()
        delta_neutral = model.delta.detach().numpy()[0]

        img = dlib.load_rgb_image("faces/surprise.png")
        im_shape = img.shape
        # Instantiate the model
        model = EnergyMin(mean_shape_land, sigma2_shape, basis_shape_land,
                          mean_expr_land, sigma2_expr, basis_expr_land,
                          im_shape)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
        loss_surprised = exercise_4_and_5(model, optimizer, img, S_land,
                                          S_whole, face_model, triangles,
                                          number_whole_points)

        alpha_surprised = model.alpha.detach().numpy()
        delta_surprised = model.delta.detach().numpy()[0]

        fig = plt.figure(figsize=(10, 6), dpi=80)
        ax = fig.add_subplot(1, 1, 1)
        ax.set_title("Loss for neutral and surprised face model",
                     fontweight='bold',
                     fontsize=18)
        ax.set_xlabel('$Epoch$', fontsize=16)
        y_ax = ax.set_ylabel('$Energy$', fontsize=16)

        colors = ['b', 'g']

        neut = ax.plot(loss_neutral, 'b')
        sur = ax.plot(loss_surprised, 'g')

        plt.legend(handles=[neut[0], sur[0]],
                   labels=['Neutral face', 'Surprised face'],
                   prop={'size': 12})

        plt.show()

        fig = plt.figure(figsize=(10, 6), dpi=80)
        ax1 = fig.add_subplot(1, 4, 1)
        ax2 = fig.add_subplot(1, 4, 2)
        ax3 = fig.add_subplot(1, 4, 3)
        ax4 = fig.add_subplot(1, 4, 4)

        ax1.set_title(r'$\alpha$ neutral', fontweight='bold', fontsize=18)
        ax2.set_title('$\delta$ neutral', fontweight='bold', fontsize=18)
        ax3.set_title(r'$\alpha$ surprised', fontweight='bold', fontsize=18)
        ax4.set_title('$\delta$ surprised', fontweight='bold', fontsize=18)
        ax1.boxplot(alpha_neutral)
        ax2.boxplot(delta_neutral)
        ax3.boxplot(alpha_surprised)
        ax4.boxplot(delta_surprised)

        print(np.min(alpha_neutral))
        print(np.max(alpha_neutral))
        print(np.min(delta_neutral))
        print(np.max(delta_neutral))

        print(np.min(alpha_surprised))
        print(np.max(alpha_surprised))
        print(np.max(delta_surprised))
        print(np.min(delta_surprised))

        plt.show()
예제 #11
0
def cg_lmk(img_path, lmk_path, predictor_path='data/lmk_predictor/shape_predictor_68_face_landmarks.dat'):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    print('predictor loaded')
    
    if 'webface' in img_path:
        data_path = os.path.join(img_path, '*', '*.jpg')
    else:
        data_path = os.path.join(img_path, '*.jpg')
    
    img_names = []
    lmks = []

    for f in glob.glob(data_path):
        lmk_f = f.replace(img_path, lmk_path)
        
        lmk_dir = os.sep.join(lmk_f.split(os.sep)[:-1])
        # print(lmk_dir)
        os.makedirs(lmk_dir, exist_ok=True)

        img = dlib.load_rgb_image(f)
        # Ask the detector to find the bounding boxes of each face. The 0 in the
        # second argument indicates that we should upsample the image 0 time. Usually, 
        # it's set to 1 to make everything bigger and allow us to detect more faces.
        dets = detector(img, 0)

        if len(dets) != 1:
            print("Processing file: {}".format(f))
            print("Number of faces detected: {}".format(len(dets)))
            continue
        rec = [dets[0].left(), dets[0].top(), dets[0].right(), dets[0].bottom()]

        shape = predictor(img, dets[0])
        points = np.array([(p.x, p.y) for p in shape.parts()])

        move = np.zeros(points.shape)
        # move 16-68 down
        move[17:, 1] += int(positive_num(points[8, 1] - points[57, 1]) * 0.3)
        # move 37-48 apart and change 37,46
        move[36:42, 0] += -int(positive_num(points[42, 0] - points[39, 0]) * 0.1)
        move[42:48, 0] += int(positive_num(points[42, 0] - points[39, 0]) * 0.1)
        move[36, 1] += 1 
        move[45, 1] += 1
        # move 28-31 shorter (and closer to 32-36)
        # move[27:31, 1] = np.ceil((points[33, 1] - points[30, 1]) * 0.2)
        move[29, 1] += round(positive_num(points[30, 1] - points[29, 1]) * 0.1)
        move[28, 1] += round(positive_num(points[30, 1] - points[28, 1]) * 0.1)
        move[27, 1] += np.ceil(positive_num(points[30, 1] - points[27, 1]) * 0.1)
        # move 32-26 wider
        move[34, 0] += 1
        move[35, 0] += np.ceil(positive_num(points[35, 0] - points[33, 0]) * 0.1)
        move[32, 0] += -1
        move[31, 0] += -np.ceil(positive_num(points[33, 0] - points[31, 0]) * 0.1)
        # move 49,61,65,55 lower
        move[48, 1] += np.ceil(positive_num(points[59, 1] - points[49, 1]) * 0.1)
        move[60, 1] += int(positive_num(points[67, 1] - points[61, 1]) * 0.1)
        move[54, 1] += np.ceil(positive_num(points[55, 1] - points[53, 1]) * 0.1)
        move[64, 1] += int(positive_num(points[65, 1] - points[63, 1]) * 0.1)
        
        if 'noonan' in f.strip().split(os.sep)[-1]:
            move = -move
            # print('reverse move:', f)

        new_points = [(p[0], p[1]) for p in (points + move)]
        

        lmk_img = np.ones(img.shape) * img.mean()
        lmk_img = Image.fromarray(lmk_img.astype('uint8'))
        lmk_draw = ImageDraw.Draw(lmk_img)
        lmk_draw.rectangle(rec, outline='black') #'black'
        lmk_draw.point(new_points, fill='white') #'white'
        del lmk_draw
        lmk_img.save(lmk_f)

        img_names.append(lmk_f.split(os.sep)[-1])
        lmks.append(new_points)

    np.save(lmk_dir + os.sep + 'img_names.npy', np.array(img_names))
    np.save(lmk_dir + os.sep + 'lmks.npy', np.array(lmks))
예제 #12
0
 def read_input_image(self, img_path):
     # self.input_img is Numpy array, (h, w, c) with RGB order
     self.input_img = dlib.load_rgb_image(img_path)
예제 #13
0
def opencv_dface(path):
    img = cv2.imread(path, 1)

    if (type(img) is np.ndarray):
        print(img.shape)

        factor = 1
        if img.shape[1] > 640:
            factor = 640.0 / img.shape[1]
        elif img.shape[0] > 480:
            factor = 480.0 / img.shape[0]

        if factor != 1:
            w = img.shape[1] * factor
            h = img.shape[0] * factor
            img = cv2.resize(img, (int(w), int(h)))

        size = img.shape

        baseUrl = settings.MEDIA_ROOT_URL + settings.MEDIA_URL
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(
            baseUrl + 'shape_predictor_68_face_landmarks.dat')

        dlibimg = dlib.load_rgb_image(path)
        dets = detector(dlibimg, 1)
        for k, d in enumerate(dets):
            # print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            #     k, d.left(), d.top(), d.right(), d.bottom()))
            # Get the landmarks/parts for the face in box d.
            shape = predictor(dlibimg, d)
            print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                      shape.part(1)))

            landmarks = list()
            for i in range(0, 68):
                landmarks.append([
                    int(shape.part(i).x * factor),
                    int(shape.part(i).y * factor)
                ])
                # print(landmarks[i])
                cv2.circle(img, (landmarks[i][0], landmarks[i][1]), 2,
                           (0, 0, 255), -1)

        #####Orientation
        image_points = np.array(
            [
                (landmarks[30][0], landmarks[30][1]),  # Nose tip
                (landmarks[8][0], landmarks[8][1]),  # Chin
                (landmarks[45][0], landmarks[45][1]),  # Left eye left corner
                (landmarks[36][0], landmarks[36][1]),  # Right eye right corne
                (landmarks[54][0], landmarks[54][1]),  # Left Mouth corner
                (landmarks[48][0], landmarks[48][1])  # Right mouth corner
            ],
            dtype="double")

        model_points = np.array([
            (0.0, 0.0, 0.0),  # Nose tip
            (0.0, -330.0, -65.0),  # Chin
            (-225.0, 170.0, -135.0),  # Left eye left corner
            (225.0, 170.0, -135.0),  # Right eye right corne
            (-150.0, -150.0, -125.0),  # Left Mouth corner
            (150.0, -150.0, -125.0)  # Right mouth corner
        ])

        # Camera internals

        center = (size[1] / 2, size[0] / 2)
        focal_length = center[0] / np.tan(60 / 2 * np.pi / 180)
        camera_matrix = np.array([[focal_length, 0, center[0]],
                                  [0, focal_length, center[1]], [0, 0, 1]],
                                 dtype="double")

        dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
        (success, rotation_vector,
         translation_vector) = cv2.solvePnP(model_points,
                                            image_points,
                                            camera_matrix,
                                            dist_coeffs,
                                            flags=cv2.SOLVEPNP_ITERATIVE)

        axis = np.float32([[500, 0, 0], [0, 500, 0], [0, 0, 500]])

        imgpts, jac = cv2.projectPoints(axis, rotation_vector,
                                        translation_vector, camera_matrix,
                                        dist_coeffs)
        modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector,
                                           translation_vector, camera_matrix,
                                           dist_coeffs)
        rvec_matrix = cv2.Rodrigues(rotation_vector)[0]

        proj_matrix = np.hstack((rvec_matrix, translation_vector))
        eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]

        pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]

        pitch = math.degrees(math.asin(math.sin(pitch)))
        roll = -math.degrees(math.asin(math.sin(roll)))
        yaw = math.degrees(math.asin(math.sin(yaw)))

        rotate_degree = (str(int(roll)), str(int(pitch)), str(int(yaw)))
        nose = (landmarks[30][0], landmarks[30][1])

        cv2.line(img, nose, tuple(imgpts[1].ravel()), (0, 255, 0), 3)  # GREEN
        cv2.line(img, nose, tuple(imgpts[0].ravel()), (
            255,
            0,
        ), 3)  # BLUE
        cv2.line(img, nose, tuple(imgpts[2].ravel()), (0, 0, 255), 3)  # RED

        # for j in range(len(rotate_degree)):
        #     cv2.putText(img, ('{:05.2f}').format(float(rotate_degree[j])), (10, 30 + (50 * j)),
        #                 cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=2, lineType=2)

        if roll == 0:
            roll_comment = str(int(roll))
        elif roll > 0:
            roll_comment = 'Rt tilt ' + str(int(roll))
        elif roll < 0:
            roll_comment = 'Lt tilt ' + str(int(roll * -1))

        if pitch == 0:
            pitch_comment = str(int(pitch))
        elif pitch > 0:
            pitch_comment = 'Down ' + str(int(pitch))
        elif pitch < 0:
            pitch_comment = 'Up ' + str(int(pitch * -1))

        if yaw == 0:
            yaw_comment = str(int(yaw))
        elif yaw > 0:
            yaw_comment = 'Rt yaw ' + str(int(yaw))
        elif yaw < 0:
            yaw_comment = 'Lt yaw ' + str(int(yaw * -1))

        result = np.zeros((img.shape[0] + 300, img.shape[1], 3))
        result[:img.shape[0], :img.shape[1]] = img

        cv2.putText(result,
                    roll_comment, (10, img.shape[0] + (50 * 1)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1, (0, 255, 0),
                    thickness=2,
                    lineType=2)

        cv2.putText(result,
                    pitch_comment, (10, img.shape[0] + (50 * 2)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1, (0, 255, 0),
                    thickness=2,
                    lineType=2)

        cv2.putText(result,
                    yaw_comment, (10, img.shape[0] + (50 * 3)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1, (0, 255, 0),
                    thickness=2,
                    lineType=2)

        for k in range(len(image_points)):
            # print(image_points[k].ravel()[0])
            cv2.circle(img, (int(
                image_points[k].ravel()[0]), int(image_points[k].ravel()[1])),
                       5, (240, 255, 10), -1)

        cv2.imwrite(path, result)

    else:
        print('someting error')
        print(path)
예제 #14
0
# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
# results.
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("detector.svm")
# In this example we load detector.svm again since it's the only one we have on
예제 #15
0
def face_landmark_detection(filename):

    predictor_path = "shape_predictor_68_face_landmarks.dat"
    faces_folder_path = filename

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    file = open(faces_folder_path+'.txt','w+')

    #win = dlib.image_window()
    array=[]
    border=[]
    for f in glob.glob(os.path.join(faces_folder_path)):
        print("Processing file: {}".format(f))
        img = dlib.load_rgb_image(f)

        #win.clear_overlay()
        #win.set_image(img)

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))
            """
            border.append(str(d.left()))
            border.append(str(d.top()))
            border.append(str(d.right()))
            border.append(str(d.bottom()))
            
            file.write(border[0])
            file.write(" ")
            file.write(border[1])
            file.write(" ")

            file.write(border[0])
            file.write(" ")
            file.write(border[3])
            file.write(" ")

            file.write(border[2])
            file.write(" ")
            file.write(border[1])
            file.write(" ")

            file.write(border[2])
            file.write(" ")
            file.write(border[3])
            file.write(" ")
            """
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            #print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
            #                                          shape.part(1)))
            # Draw the face landmarks on the screen.
            #win.add_overlay(shape)
        img = Image.open(filename)
        img = np.array(img)
        width = img.shape[1]-1
        heigth = img.shape[0]-1
        half_width = img.shape[1] // 2
        half_height = img.shape[0] // 2
        border_points = [(0,0),(width,0),(0,heigth),(width,heigth),(0,half_height),(width,half_height),(half_width,0),(half_width,heigth)]

        for i in range(68):
            array.append(shape.part(i))
            ca = str(array[i])
            c=(ca.split(","))
            if i!=0:
                file.write("\n")
            for j in c[0]:
                if j!='(':
                        file.write(j)
            for j in c[1]:
                if j!=')':
                    file.write(j)
        file.write("\n")
        for i in range(len(border_points)):
            file.write(str(border_points[i][0]) + " " + str(border_points[i][1]) + "\n")
        file.close()
예제 #16
0
face_rec_model_path = sys.argv[2]
faces_folder_path = sys.argv[3]

# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

win = dlib.image_window()

# Now process all the images
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))

    # Now process each face we found.
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
예제 #17
0
import cv2
import os
import dlib

model_path = './liveness_model.h5'

#load face detector
hog_detector = dlib.get_frontal_face_detector()

print("[INFO] loading trained model liveness detector...")
model = load_model(model_path)
currentFolder = './test_net'
imagePaths = list(paths.list_images(currentFolder))

for imagePath in imagePaths:
    image = dlib.load_rgb_image(imagePath)
    dets = hog_detector(image, 1)
    if(len(dets) == 0):
        print('None face detected')
    else:
        for i, d in enumerate(dets):
            face_crop = image[d.top():d.bottom(), d.left():d.right()]
            face_crop = cv2.resize(face_crop, (64,64))
            face = img_to_array(face_crop)
            face = np.expand_dims(face, axis=0)
            preds = model.predict(face)
            predicted_class_indices=np.argmax(preds,axis=1)
            if(predicted_class_indices[0] == 1):
                print('real')
            else:
                print('fake')
예제 #18
0
    def process(self, tup):
        mylogger = loger.getLoger("Whisper", Constants.boltpath + "logs")
        try:
            timenow = Blocker.current_milli_time()
            # We only use the last value of tuple, the other two are fixed in
            # WhisperCache.java. Change this to make other things
            original_image = tup.values[0]
            alias = tup.values[1]
            filepath = tup.values[2]
            # Generic models
            video = DatabaseSession.session.query(Videos).filter(
                Videos.video_path == filepath).first()

            # Some paths

            detector = dlib.get_frontal_face_detector()
            sp = dlib.shape_predictor(
                Constants.predictor_path)  # 128D face descriptor predictor
            facerec = dlib.face_recognition_model_v1(
                Constants.face_rec_model_path)
            descriptors = []
            images = []

            # Now find all the faces and compute 128D face descriptors for each face.
            for f in glob.glob(
                    os.path.join(Constants.faces_folder_path, "*.png")):
                mylogger.info("Processing file: {}".format(f))
                img = dlib.load_rgb_image(f)

                # Ask the detector to find the bounding boxes of each face. The 1 in the
                # second argument indicates that we should upsample the image 1 time. This
                # will make everything bigger and allow us to detect more faces.
                dets = detector(img, 1)
                mylogger.info("Number of faces detected: {}".format(len(dets)))

                # Now process each face we found.
                for k, d in enumerate(dets):
                    # Get the landmarks/parts for the face in box d.
                    shape = sp(img, d)

                    # Compute the 128D vector that describes the face in img identified by
                    # shape.
                    face_descriptor = facerec.compute_face_descriptor(
                        img, shape)
                    descriptors.append(face_descriptor)
                    images.append((img, shape))

            # Now let's cluster the faces.
            labels = dlib.chinese_whispers_clustering(
                descriptors, Constants.TOLERANCE_WHISPER)
            num_classes = len(set(labels))
            mylogger.info("Number of clusters: {}".format(num_classes))
            if num_classes > 0:
                counts = 0
                DatabaseSession.session.query(Wishper).filter(
                    Wishper.video == video).delete()
                DatabaseSession.session.commit()
                for i, label in enumerate(labels):
                    # Ensure output directory exists
                    output_folder_path_real = Constants.output_folder_path + str(
                        label)
                    if not os.path.isdir(output_folder_path_real):
                        os.makedirs(output_folder_path_real)

                    # Save the extracted faces
                    mylogger.info("Saving faces cluster to output folder...")
                    img, shape = images[i]
                    file_path = os.path.join(output_folder_path_real,
                                             "face_" + str(counts))
                    # The size and padding arguments are optional size=300x300 and padding=0.25
                    dlib.save_face_chip(img,
                                        shape,
                                        file_path,
                                        size=300,
                                        padding=0.25)
                    counts = counts + 1
                    whisper = Wishper(alias=file_path,
                                      original_image=file_path,
                                      video=video,
                                      group=str(label),
                                      path=file_path + ".jpg")

                    DatabaseSession.session.add(whisper)
                    DatabaseSession.session.commit()

        except Exception as inst:
            mylogger.error("Python error.")
            mylogger.error(type(inst))
            mylogger.error(inst)
예제 #19
0
    def custom_test(self, testing_samples_dir, age, gender):
        if not self.load_checkpoint():
            print("\tFAILED >_<!")
            exit(0)
        else:
            print("\tSUCCESS ^_^")
        predictor_path = 'shape_predictor_68_face_landmarks.dat'
        detector = dlib.get_frontal_face_detector()
        sp = dlib.shape_predictor(predictor_path)

        num_samples = int(np.sqrt(self.size_batch))
        file_names = glob(testing_samples_dir)
        for i in range(len(file_names)):
            file_name = file_names[i]
            img = dlib.load_rgb_image(file_name)
            dets = detector(img, 1)

            if len(dets) is 1:
                for k, d in enumerate(dets):
                    shape = sp(img, d)
                    dlib.save_face_chip(img,
                                        shape,
                                        "test/result/thumbnail",
                                        size=256,
                                        padding=0.1)

                sample_files = []
                for j in range(num_samples):
                    sample_files.append("test/result/thumbnail.jpg")

                sample = [
                    load_image(
                        image_path=sample_file,
                        image_size=self.size_image,
                        image_value_range=self.image_value_range,
                        is_gray=(self.num_input_channels == 1),
                    ) for sample_file in sample_files
                ]
                if self.num_input_channels == 1:
                    images = np.array(sample).astype(np.float32)[:, :, :, None]
                else:
                    images = np.array(sample).astype(np.float32)
                gender_male = np.ones(
                    shape=(num_samples, 2),
                    dtype=np.float32) * self.image_value_range[0]
                gender_female = np.ones(
                    shape=(num_samples, 2),
                    dtype=np.float32) * self.image_value_range[0]
                for i in range(gender_male.shape[0]):
                    gender_male[i, 0] = self.image_value_range[-1]
                    gender_female[i, 1] = self.image_value_range[-1]

                if 0 <= age <= 5:
                    label = 0
                elif 6 <= age <= 10:
                    label = 1
                elif 11 <= age <= 15:
                    label = 2
                elif 16 <= age <= 20:
                    label = 3
                elif 21 <= age <= 30:
                    label = 4
                elif 31 <= age <= 40:
                    label = 5
                elif 41 <= age <= 50:
                    label = 6
                elif 51 <= age <= 60:
                    label = 7
                elif 61 <= age <= 70:
                    label = 8
                else:
                    label = 9

                if gender is 0:
                    self.test(images, gender_male, 'test.png')
                else:
                    self.test(images, gender_female, 'test.png')

                img = cv2.imread(self.save_dir + '/test/test.png')
                a = 128 * label
                b = 128 * (label + 1)
                img0 = img[a:b, a:b]
                cv2.imwrite('test/result/thumbnail.png', img0)
                try:
                    output = faceSwap('test/result/thumbnail.png', file_name)
                    cv2.imwrite(
                        'test/result/' + str(age) + '_' + str(gender) + '_' +
                        file_name.split('\\')[1], output)

                    input = cv2.imread(file_name)
                    cv2.imshow('input', input)
                    cv2.imshow('output', output)
                    cv2.waitKey(0)

                    print('\n\tDone! Results are saved as %s' %
                          'test/result/' + str(age) + '_' + str(gender) + '_' +
                          file_name.split('\\')[1])
                except IndexError as e:
                    print(
                        'Result The face is not normal. Please put good quality pictures in your input.'
                    )
                os.remove('test/result/thumbnail.png')
                os.remove('test/result/thumbnail.jpg')
예제 #20
0
def predict_single(img_path,
                   model_path,
                   image_size=IMAGE_SIZE,
                   depth_multiplier=1.0,
                   predict_fn=pfld_predict_landmarks,
                   zero_mean=True,
                   box_detector='dlib',
                   **kwargs):
    img_size = image_size
    gt_landmark = None
    if box_detector == 'gt':
        points, imgs_sizes, imgs = load_landmarks(
            '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train.xml'
        )
        fn = os.path.basename(img_path)
        gt_landmark = None
        for idx, img in enumerate(imgs):
            if img.endswith(fn):
                gt_landmark = points[idx]
                break
        if gt_landmark is not None:
            min_y, max_y = gt_landmark[:, 1].min(), gt_landmark[:, 1].max()
            min_x, max_x = gt_landmark[:, 0].min(), gt_landmark[:, 0].max()
            box = Rect(min_y, max_y, min_x, max_x)
            # _, gt_landmark = crop_and_resize(, gt_landmark, image_size)
    elif box_detector == 'tf':
        detector = get_face_detector()
        l, t, r, b = detector.detect(img_path)
        box = Rect(t, b, l, r)

    # get face bound
    else:
        img = dlib.load_rgb_image(img_path)
        detector = dlib.get_frontal_face_detector()
        box = detector(img, 1)[0]

    oridata = cv2.imread(img_path)
    # if image_size ==80:
    #     oridata = cv2.cvtColor(oridata,cv2.COLOR_BGR2RGB)
    data = crop(oridata, box)
    data = resize(data, (img_size, img_size),
                  anti_aliasing=True,
                  mode='reflect')
    # view_img(data, None)
    normalized_data = normalize_data(data)
    if model_path.endswith('.tflite'):
        # print('using tflite model ', model_path)
        # is_unint8 = model_path.find('uint8') >= 0
        # if is_unint8:
        #     print('int model')
        #     lmks = predict_tflite((np.reshape(data, (1, *data.shape)) * 255).astype(np.uint8), model_path)[0]
        # else:
        print('float model')
        lmks = predict_tflite(
            np.reshape(normalized_data,
                       (1, *normalized_data.shape)).astype(np.float32),
            model_path)[0]
    else:
        lmks = predict(np.reshape(normalized_data,
                                  (1, *normalized_data.shape)),
                       model_path,
                       predict_fn,
                       image_size=image_size,
                       depth_multiplier=depth_multiplier,
                       **kwargs)[0]
    # print('landmark = ', lmks)
    if zero_mean:
        for i in range(0, 68):
            lmks[i * 2] = (lmks[i * 2] / 2 +
                           0.5) * image_size  # (lmks[i*2]/2+0.5)*image_size
            lmks[i * 2 +
                 1] = (lmks[i * 2 + 1] / 2 +
                       0.5) * image_size  # (lmks[i*2+1]/2 + 0.5)*image_size
    else:
        for i in range(0, 68):
            lmks[i *
                 2] = (lmks[i *
                            2]) * image_size  # (lmks[i*2]/2+0.5)*image_size
            lmks[i * 2 +
                 1] = (lmks[i * 2 + 1]
                       ) * image_size  # (lmks[i*2+1]/2 + 0.5)*image_size
        # print('landmarks after denorm', lmks)
    lmks = lmks.reshape((68, 2))

    view_img(data, lmks)
#!/usr/bin/env python
# coding: utf-8

import dlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np

detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('../models/shape_predictor_5_face_landmarks.dat')

img = dlib.load_rgb_image('../imgs/02.jpg')
plt.figure(figsize=(16, 10))
plt.imshow(img)
plt.show()

img_result = img.copy()
dets = detector(img)
if len(dets) == 0:
    print('cannot find faces!')
else:
    fig, ax = plt.subplots(1, figsize=(16, 10))
    for det in dets:
        x, y, w, h = det.left(), det.top(), det.width(), det.height()
        rect = patches.Rectangle((x, y),
                                 w,
                                 h,
                                 linewidth=2,
                                 edgecolor='w',
import dlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np

# In[2]:

detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('../models/shape_predictor_5_face_landmarks.dat')

# In[3]:

img = dlib.load_rgb_image('../imgs/12.jpg')
plt.figure(figsize=(16, 10))
plt.imshow(img)
plt.show()

# In[4]:

img_result = img.copy()
dets = detector(img)
if len(dets) == 0:
    print('connot find faces!')
else:
    fig, ax = plt.subplots(1, figsize=(16, 10))
    #얼굴부분에 사각형 박스 그리기
    for det in dets:
        x, y, w, h = det.left(), det.top(), det.width(), det.height(
예제 #23
0
def process_imgs(list_path):
    with open(list_path) as f:
        img_paths = f.read().splitlines()

    proxyimg_path = img_paths[0]
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        'data/shape_predictor_68_face_landmarks.dat')

    cur_id = 1
    proxyimg = dlib.load_rgb_image(proxyimg_path)
    dets, scores, idx = detector.run(proxyimg)
    score = -1
    best_id = -1
    for i in range(len(dets)):
        if scores[i] > score:
            score = scores[i]
            best_id = i
    if best_id == -1:
        return {'success': 0}
    shape = predictor(proxyimg, dets[best_id])
    lands = np.zeros((68, 2), np.float32)
    for i in range(68):
        lands[i, 0] = shape.part(i).x
        lands[i, 1] = shape.part(i).y
    lms_x = lands[:, 0]
    lms_y = lands[:, 1]
    x_min = np.amin(lms_x)
    x_max = np.amax(lms_x)
    x_center = (x_max + x_min) / 2
    y_top = 2 * lms_y[19] - lms_y[29]
    y_bot = lms_y[8]
    y_len = y_bot - y_top
    y_top = y_top - 0.1 * y_len
    y_center = (y_top + y_bot) / 2
    crop_width = (x_max - x_min) * 1.1
    crop_height = (y_bot - y_center) * 2 * 1.1
    crop_width_34 = max(crop_width, crop_height * 3 / 4)

    center_x = int(x_center)
    center_y = int(y_center)
    crop_width = int(crop_width)
    crop_width = crop_width + (-crop_width) % 3
    crop_height = int(crop_width / 3 * 4)

    w_size_proxy = int(150)
    h_size_proxy = int(200)
    w_size = int(600)
    h_size = int(800)

    cam = torch.tensor((535 * 4, w_size / 2, h_size / 2), dtype=torch.float)

    imgs = []
    for i in range(len(img_paths)):
        cvimg = cv2.imread(img_paths[i], cv2.IMREAD_UNCHANGED)
        img = crop_cvimg(cvimg, center_x, center_y, crop_width, crop_height,
                         w_size, h_size)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = np.transpose(img, (2, 0, 1))
        if i == 0:
            img_proxy = crop_cvimg(cvimg, center_x, center_y, crop_width,
                                   crop_height, w_size_proxy, h_size_proxy)
            img_proxy = cv2.cvtColor(img_proxy, cv2.COLOR_BGR2RGB)
            img_proxy = np.transpose(img_proxy, (2, 0, 1))
        imgs.append(img)

    img = np.concatenate(imgs, 0)
    img_tensor = torch.as_tensor(img).view(-1, 3, h_size, w_size)
    img_proxy_tensor = torch.as_tensor(img_proxy).unsqueeze(0)
    cam = cam.unsqueeze(0)
    return {
        'success': 1,
        'img': img_tensor,
        'img_proxy': img_proxy_tensor,
        'cam': cam
    }
예제 #24
0
# 离线版本:人脸检测-特征点标定
import sys
import dlib

# 重要!!!:运行时,基于自己的路径做修改
predictor_path = './model/shape_predictor_68_face_landmarks.dat'  # 68点模型文件
face_file_path = './data/image/faces/bald_guys.jpg' # 人脸照片路径

# 人脸检测器
detector = dlib.get_frontal_face_detector()
# 特征点检测器
sp = dlib.shape_predictor(predictor_path)

# 导入待处理图片
img = dlib.load_rgb_image(face_file_path)

# 人脸检测
dets = detector(img, 1)
# 人脸总数
num_faces = len(dets)
if num_faces == 0:
    print("Sorry, there were no faces found in '{}'".format(face_file_path))
    exit()

# 查找68个点的坐标
faces = dlib.full_object_detections()
for detection in dets:
    faces.append(sp(img, detection))

window = dlib.image_window()
예제 #25
0
def process_image(lock, img_file):

    lock.acquire()
    cnt = ns.cnt
    ns.cnt += 1
    lock.release()

    output_img = os.path.join(args.output_dir, f"{cnt:08}.png")
    #if os.path.isfile(output_img):
    #    continue
    img = dlib.load_rgb_image(img_file)
    dets = detector(img, 1)
    if len(dets) <= 0:
        print("no face landmark detected")
        return cnt
    else:
        shape = sp(img, dets[0])
        points = np.empty([68, 2], dtype=int)
        for b in range(68):
            points[b, 0] = shape.part(b).x
            points[b, 1] = shape.part(b).y
        lm = points
    # lm = fa.get_landmarks(input_img)[-1]
    # lm = np.array(item['in_the_wild']['face_landmarks'])
    lm_chin = lm[0:17]  # left-right
    lm_eyebrow_left = lm[17:22]  # left-right
    lm_eyebrow_right = lm[22:27]  # left-right
    lm_nose = lm[27:31]  # top-down
    lm_nostrils = lm[31:36]  # top-down
    lm_eye_left = lm[36:42]  # left-clockwise
    lm_eye_right = lm[42:48]  # left-clockwise
    lm_mouth_outer = lm[48:60]  # left-clockwise
    lm_mouth_inner = lm[60:68]  # left-clockwise

    # Calculate auxiliary vectors.
    eye_left = np.mean(lm_eye_left, axis=0)
    eye_right = np.mean(lm_eye_right, axis=0)
    eye_avg = (eye_left + eye_right) * 0.5
    eye_to_eye = eye_right - eye_left
    mouth_left = lm_mouth_outer[0]
    mouth_right = lm_mouth_outer[6]
    mouth_avg = (mouth_left + mouth_right) * 0.5
    eye_to_mouth = mouth_avg - eye_avg

    # Choose oriented crop rectangle.
    x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
    x /= np.hypot(*x)
    x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
    y = np.flipud(x) * [-1, 1]
    c = eye_avg + eye_to_mouth * 0.1
    quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
    qsize = np.hypot(*x) * 2

    img = PIL.Image.open(img_file)
    img = img.convert('RGB')

    # Shrink.
    shrink = int(np.floor(qsize / output_size * 0.5))
    if shrink > 1:
        rsize = (int(np.rint(float(img.size[0]) / shrink)),
                 int(np.rint(float(img.size[1]) / shrink)))
        img = img.resize(rsize, PIL.Image.ANTIALIAS)
        quad /= shrink
        qsize /= shrink

    # Crop.
    border = max(int(np.rint(qsize * 0.1)), 3)
    crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))),
            int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))
    crop = (max(crop[0] - border, 0), max(crop[1] - border, 0),
            min(crop[2] + border,
                img.size[0]), min(crop[3] + border, img.size[1]))
    if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
        img = img.crop(crop)
        quad -= crop[0:2]

    # Pad.
    pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))),
           int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))
    pad = (max(-pad[0] + border,
               0), max(-pad[1] + border,
                       0), max(pad[2] - img.size[0] + border,
                               0), max(pad[3] - img.size[1] + border, 0))
    if enable_padding and max(pad) > border - 4:
        pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
        img = np.pad(np.float32(img),
                     ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
        h, w, _ = img.shape
        y, x, _ = np.ogrid[:h, :w, :1]
        mask = np.maximum(
            1.0 -
            np.minimum(np.float32(x) / pad[0],
                       np.float32(w - 1 - x) / pad[2]), 1.0 -
            np.minimum(np.float32(y) / pad[1],
                       np.float32(h - 1 - y) / pad[3]))
        blur = qsize * 0.02
        img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) -
                img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
        img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
        img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)),
                                  'RGB')
        quad += pad[:2]

    # Transform.
    img = img.transform((transform_size, transform_size), PIL.Image.QUAD,
                        (quad + 0.5).flatten(), PIL.Image.BILINEAR)
    if output_size < transform_size:
        img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)

    # Save aligned image.
    img.save(output_img)
    return cnt
예제 #26
0
# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
# results.
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("detector.svm")
# In this example we load detector.svm again since it's the only one we have on
fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D)

print('Reading video frames...')

numImg = len([
    name for name in os.listdir(frames_path)
    if os.path.isfile(os.path.join(frames_path, name))
])
from_first = True

with open(save_path, 'w+') as out:
    for f in range(1, numImg + 1):
        number = '{0:04d}'.format(f)
        filename = os.path.join(frames_path, "frames" + number + ".jpg")
        img = dlib.load_rgb_image(filename)
        dets = fa.get_detections_for_image(img)

        #dets = cnn_face_detector(img, 1)
        sortedDets = sorted(dets, key=lambda a: a[-1], reverse=True)
        if (len(dets) == 0):
            print('No faces detected. Using last detection result.')
            if from_first:
                print(
                    'Not detected on first frame, second frame will be used twice'
                )
                continue
        else:
            d = sortedDets[0]
        if from_first:
            out.write('%d, %d, %d, %d\n' % (d[0], d[1], d[2], d[3]))
예제 #28
0
        "execute this program by running:\n"
        "    ./face_landmark_detection.py shape_predictor_68_face_landmarks.dat ../examples/faces\n"
        "You can download a trained facial shape predictor from:\n"
        "    http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2")
    exit()

predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()

for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
예제 #29
0
import dlib
import cv2 as cv
#cnn_face_detector = dlib.cnn_face_detection_model_v1("./models/dlib/mmod_human_face_detector.dat")
cnn_face_detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor("./models/dlib/shape_predictor_5_face_landmarks.dat")
facerec = dlib.face_recognition_model_v1(
    "./models/dlib/dlib_face_recognition_resnet_model_v1.dat")
win = dlib.image_window()
img = dlib.load_rgb_image("11.png")
win.clear_overlay()
win.set_image(img)
dets = cnn_face_detector(img, 1)
for k, d in enumerate(dets):
    shape = sp(img, d)
    win.clear_overlay()
    win.add_overlay(d)
    win.add_overlay(shape)
    face_descriptor = facerec.compute_face_descriptor(img, shape)
    print(face_descriptor)
    '''''
    face_chip = dlib.get_face_chip(img, shape)
    face_descriptor_from_prealigned_image = facerec.compute_face_descriptor(face_chip)
    print(face_descriptor_from_prealigned_image)
    ''' ''
    dlib.hit_enter_to_continue()
#
#   This utility generates the test data required for the tests contained in test_numpy_returns.py
#
#   Also note that this utility requires Numpy which can be installed
#   via the command:
#       pip install numpy
import sys
import dlib
import numpy as np
import utils

if len(sys.argv) != 2:
    print(
        "Call this program like this:\n"
        "   ./generate_numpy_returns_test_data.py shape_predictor_5_face_landmarks.dat\n"
        "You can download a trained facial shape predictor from:\n"
        "    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
    exit()


detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(sys.argv[1])

img = dlib.load_rgb_image("../../../examples/faces/Tom_Cruise_avp_2014_4.jpg")
dets = detector(img)
shape = predictor(img, dets[0])

utils.save_pickled_compatible(shape, "shape.pkl")

face_chip = dlib.get_face_chip(img, shape)
np.save("test_face_chip", face_chip)
    #win_det = dlib.image_window()
    #win_det.set_image(detector)
    
    print("Showing detections on the images in the plates folder...")
    win = dlib.image_window()
    global lendiff , hdiff
    #wins=[]
    file_list=[]
    if  plate_folder.find(".jpg",-4)>-1 :
        file_list.append(plate_folder)
    else:
        file_list=glob.glob(os.path.join(plate_folder, "*.jpg"))

    for f in file_list:#glob.glob(os.path.join(plate_folder, "*.jpg")):
        print("Processing file: {}".format(f))
        img = dlib.load_rgb_image(f)
        #print (img)
        #p_image=Image.fromarray(img,'RGB')
        #rects = []
        #dlib.find_candidate_object_locations(img, rects, min_size=100)
        #p_image=p_image.convert('L',dither=Image.NONE)
        #p_image=p_image.filter(ImageFilter.GaussianBlur())
        #p_image=p_image.filter(ImageFilter.CONTOUR)    
        #p_image=p_image.convert('1')#,dither=Image.NONE)
        #p_image.show()
        #print(np.asarray(p_image,dtype="uint8"))
        #dets = detector(np.asarray( p_image,dtype="uint8"))
        dets = detector(img)
        print("Number of plate detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
예제 #32
0
#       sudo apt-get install cmake
#
#   Also note that this example requires Numpy which can be installed
#   via the command:
#       pip install numpy

import sys

import dlib

detector = dlib.get_frontal_face_detector()
win = dlib.image_window()

for f in sys.argv[1:]:
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)
    # The 1 in the second argument indicates that we should upsample the image
    # 1 time.  This will make everything bigger and allow us to detect more
    # faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for i, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            i, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

def splitcharacter(imagein,find_plate):
    img_arr2=dlib.as_grayscale(imagein)
    img_arr2=dlib.threshold_image(img_arr2)
    #print(type(imagein))
    #plate_str=find_plate.get_platestr_from_image(img_arr2)
    #print(plate_str)
    #img1=Image.fromarray(img_arr2,'L')
    '''rects = []
    dlib.find_candidate_object_locations(img_arr2, rects, min_size=50)
    print(len(rects))
    mywin = dlib.image_window()
    for rect in rects:
        #img2=img1
        #imagefinal=ImageDraw.Draw(img2)
        #imagefinal.rectangle(((rect.left(),rect.top()),(rect.right(),rect.bottom())),outline="black")
        #img2.show()
        mywin.clear_overlay()
        mywin.set_image(img_arr2)
        mywin.add_overlay(rect)
        #dlib.hit_enter_to_continue()
        time.sleep(0.1)
        #print(rect)
        '''
    #--------------------------------------------
    xmargin=ceil(float(lendiff)/8)
    ymargin=ceil(float(hdiff*9)/2)
    img=255-img_arr2
    h = img.shape[0]
    w = img.shape[1]

    print(h,w,"<==")
    #find blank columns:
    white_num = []
    white_max = 0
    for i in range(w):
        white = 0
        for j in range(h):
            #print(img[j,i])
            if img[j,i] <127:
                white += 1
        white_num.append(white)
        white_max = max(white_max, white)
    blank = []
    print("whitre_max=%d,%f"%(white_max,0.895*white_max))
    for i in range(w):
        #print(white_num[i])
        if (white_num[i]  > 0.895 * white_max):
            blank.append(True)
            #print('*')
            #time.sleep(1)
        else:
            blank.append(False)

    #split index:
    i = 0
    num = 0
    l = 0
    x,y,d = [],[],[]
    while (i < w):
        if blank[i]:
            i += 1
        else:
            j = i
            while (j<w)and((not blank[j])or(j-i<10)):
                j += 1
            x.append(i)
            y.append(j)
            d.append(j-i)
            l += 1
            i = j
    print("len=%d"%l)
    failbox=[]
    whitesum=0

    avgdiff2=round(w/8)
    avgdiff=0#avgdiff2
    sumavg=1
    #for k in range(l):
    #    print(x[k],y[k],d[k],avgdiff,sumavg)
    #    avgdiff=(avgdiff+d[k])/2
    #avgdiff2=avgdiff
    #avgdiff=0
    #print(avgdiff2)
    for k in range(l):
        print(x[k],y[k],d[k],avgdiff,d[k]/avgdiff2)
        if d[k]/avgdiff2 < 1:
            avgdiff=(avgdiff+d[k]*1.0)/2
        #if ((d[k]*1.0)/avgdiff2)<1.8: #xmargin):
        #    avgdiff=avgdiff+d[k]
        #    avgdiff2=d[k]
        #    sumavg=sumavg+1
    #avgdiff=round(avgdiff/sumavg) +2*xmargin
    avgdiff=avgdiff + xmargin
    if l<=4:
        
        avgdiff = round((w-10*xmargin)/8)
    """
    for k in range(l):
        if k==0 :
            print(x[k],y[k],d[k])
        else:
            print(x[k],y[k],d[k],round(d[k]/(avgdiff/(k+1))))
        if k==0 or round(d[k]/(avgdiff/(k+1)))>1:
            avgdiff = avgdiff + d[k]
    avgdiff=round((avgdiff)/l)-xmargin
    """
    print("*(%d)*"%avgdiff)
    if l< 8:
        k=0
        while k<l:
            print(k,d[k]/avgdiff,x[k],y[k],d[k])
            if (d[k]*1.0)/avgdiff>1.80:
                dn=d[k]-avgdiff
                d[k]=avgdiff
                yn=y[k]
                if k==6:
                    x[k]=x[k]+xmargin
                    xn=x[k]+avgdiff+xmargin*1
                    y[k]=x[k]+avgdiff
                elif k==2:
                    d[k]=avgdiff*2;
                    y[k]=x[k]+2*avgdiff+xmargin
                    xn=x[k]+avgdiff*2+xmargin*2
                else:
                    xn=x[k]+avgdiff+xmargin
                    y[k]=x[k]+avgdiff
                #k=k+1
                if yn<= xn :
                    k=k+1
                    continue
                x.insert(k+1,xn)
                y.insert(k+1,yn)
                d.insert(k+1,dn)
                print(k,x[k],y[k],d[k])
                print(xn,yn,dn)
                l=l+1
                if l==8:
                    break
            k=k+1

    for k in range(l):
        for i in range(int(x[k]),int(y[k])):
            whitesum += white_num[i]
        failbox.append((100*whitesum)/(h*(int(y[k])-int(x[k]))))
        #if ((100*whitesum)/(h*(y[k]-x[k]))) < 20 :
        #    failbox.append(True)
        #else:
        #    failbox.append(False)
        whitesum=0


    for k in range(l):
        if round((d[k]*1.0)/avgdiff)>1 and l>=8:
            if k==0:
                x[k]=x[k]+avgdiff
            elif k==l-1:
                y[k]=y[k]-avgdiff
                if y[k]-x[k]< avgdiff :
                    y[k]=x[k]+avgdiff+xmargin
        if round((d[k]*1.0)/avgdiff)<1 :
            failbox[k]=2
        print(x[k],y[k],round((d[k]*1.0)/avgdiff))
        #if y[k]-x[k]< avgdiff :
        #    y[k]=x[k]+avgdiff
    print(failbox)
    print("<===============>")
    realidx=0
    while l > 8:
        for k in range(len(failbox)):
            if failbox[k] < 33:
                del x[realidx]
                del y[realidx]
                del d[realidx]
                failbox[k]=-1
                l= l-1
            else:
                realidx+=1
        k=0
        lk=len(failbox)
        while k<lk:
            if failbox[k]==-1:
                del failbox[k]
                lk = lk-1
            else:
                k = k+1
        print(failbox)
        for ifl in range(8,l):
            doval = min(failbox)
            doval_idx= failbox.index(doval)  
            del x[doval_idx],y[doval_idx],d[doval_idx],failbox[doval_idx]
            l = l-1

    realidx=0
    for k in range(len(failbox)):
        if k >= len(failbox) :
            break
        
        if failbox[k] < 20:
            del x[realidx]
            del y[realidx]
            del d[realidx]
            del failbox[k]
            l= l-1
        else:
            realidx+=1
 
    if l< 8:
        k=0
        while k<l:
            print(k,d[k]/avgdiff,x[k],y[k],d[k])
            if (d[k]*1.0)/avgdiff>1.80:
                dn=d[k]-avgdiff
                d[k]=avgdiff
                yn=y[k]
                if k==6:
                    x[k]=x[k]+xmargin
                    xn=x[k]+avgdiff+xmargin*1
                    y[k]=x[k]+avgdiff
                elif k==2:
                    d[k]=avgdiff*2;
                    y[k]=x[k]+2*avgdiff+xmargin
                    xn=x[k]+avgdiff*2+xmargin*2
                else:
                    xn=x[k]+avgdiff+xmargin
                    y[k]=x[k]+avgdiff
                #k=k+1
                if yn<= xn :
                    k=k+1
                    continue
                x.insert(k+1,xn)
                y.insert(k+1,yn)
                d.insert(k+1,dn)
                print(k,x[k],y[k],d[k])
                print(xn,yn,dn)
                l=l+1
                if l==8:
                    break
            k=k+1
    print("--------%d---------------------------------"%l)
    #--------------------------------------------
    #print (type(img_arr2))
    #print(" ")
    #print(img_arr2)
    #img_arr2=sobel(img_arr2)
    img1=Image.fromarray(img_arr2)
    #img1=img1.filter(ImageFilter.CONTOUR)
    #img1=img1.convert('1')
    #img1.show()
    img1.save('tmp.jpg')
    img2=dlib.load_rgb_image('tmp.jpg')
    ximg,yimg=img1.size
    print(img1.size,img2.shape)
    img2=img2[2*int(yimg/4):3*int(yimg/4),0:ximg]
    #img2=np.asarray(img1,dtype='int32')
    #img22=Image.fromarray(img2)
    #img22.show()
    img_arr=dlib.as_grayscale(img2)
    img_arr=dlib.threshold_image(img_arr)
    #print (type(img2))
    #print(" ")
    #print(img_arr)
    Data= {'x':[],'y':[]}
    for y2 in range(len(img_arr)):
        for x2 in range(len(img_arr[0])):
            if img_arr[y2][x2]<128:
                Data['x'].append(x2)
                Data['y'].append(y2)

    df = DataFrame(Data,columns=['x','y'])
    cluster=10
    try:
        kmeans = KMeans(n_clusters=cluster).fit(df)
    except Exception as e:
        return None
    centroids=kmeans.cluster_centers_

    #print(len(centroids),centroids)
    centroids=sorted(centroids,key = lambda x2: x2[0])
    #centroids.reverse();
    print(centroids)
    imageofnumber=[]
    print ("==>",xmargin,ymargin)
    imagefinal=ImageDraw.Draw(img1)
    #for point in centroids:
        #imagefinal.ellipse((point[0]-int(avgdiff/2),point[1]+int(yimg/3)-2,point[0]+int(avgdiff/2),point[1]+int(yimg/3)+2),fill=55)
        #imagefinal.rectangle(((point[0]-int(avgdiff/2),0),(point[0]+int(avgdiff/2),yimg-3)),outline="blue")
        #imagefinal.rectangle(((point[0]-xmargin,point[1]-ymargin),(point[0]+xmargin,point[1]+ymargin)),outline="black")
    for k in range(l):
        imagefinal.rectangle(((x[k],1),(y[k],yimg-1)),outline="green")
    #xwalk=int(ximg-5)/10
    #ywalk=int(yimg-4)
    #xx=xwalk
    
    #while( xx < ximg-xwalk ):
    #    imagefinal.rectangle(((xx,2),(xx+xwalk,2+ywalk)),outline="black")
    #    xx=xx+xwalk
    img1.show() 
    firstx=0
    xstep=0
    """
    for point in centroids:
        if firstx==0:
            firstx=point[0]-int(avgdiff/2)
        else:
            #print(int(firstx-xmargin),int(0),int(point[0]+xmargin),int(yimg))
            print(int(firstx-avgdiff/2),int(0),int(point[0]+avgdiff/2),int(yimg))
            p11=int(0)#point[1]-ymargin)
            p12=int(yimg)#point[1]+ymargin)
            #p21=int(firstx-xmargin)#point[0]-xmargin)
            p21=int(firstx-avgdiff/2)#point[0]-xmargin)
            if xstep==0:
                xstep=point[0]-firstx
            p22=int(point[0]+avgdiff/2)
            if p11 < 0 :
                p11=0
            if p21 < 0:
                p21=0
            firstx=point[0]-int(avgdiff/2)
            imageofnumber.append(imagein[p11:p12,p21:p22])

    """ 
    for i in range(l):
        print(x[i],y[i])
        if x[i]<3 :
            x[i]=3
        if y[i]>w-2 :
            y[i]=w-2
        
        imageofnumber.append(imagein[0:int(yimg),int(x[i]-2):int(y[i])+2])
    return  imageofnumber
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
#   You can install dlib using the command:
#       pip install dlib
#
#   Alternatively, if you want to compile dlib yourself then go into the dlib
#   root folder and run:
#       python setup.py install
#
#   Compiling dlib should work on any operating system so long as you have
#   CMake installed.  On Ubuntu, this can be done easily by running the
#   command:
#       sudo apt-get install cmake
#
#   Also note that this example requires Numpy which can be installed
#   via the command:
#       pip install numpy 

import dlib

image_file = '../examples/faces/2009_004587.jpg'
img = dlib.load_rgb_image(image_file)

# Locations of candidate objects will be saved into rects
rects = []
dlib.find_candidate_object_locations(img, rects, min_size=500)

print("number of rectangles found {}".format(len(rects))) 
for k, d in enumerate(rects):
    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        k, d.left(), d.top(), d.right(), d.bottom()))
training_xml_path = os.path.join(training_xml)

dlib.train_simple_object_detector(training_xml_path, svm_file, options)

print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(dlib.test_simple_object_detector(training_xml_path, svm_file)))

detector = dlib.simple_object_detector(svm_file)
max_detections = 0
images_tested = 0
win_det = dlib.image_window()
for filename in os.listdir(image_folder):
    if max_detections > 10:
        print(max_detections)

    if ".jpg" in filename or ".png" in filename:
        img_file = image_folder + "/" + filename
        img = dlib.load_rgb_image(img_file)
        dets = detector(img)
        if len(dets) > 0:
            win_det.clear_overlay()
            win_det.set_image(img)
            for d in dets:
                win_det.add_overlay(d)
            max_detections = max_detections + 1
            input("Press Enter to continue...")
        images_tested = images_tested + 1

print("Images Test: %d  Matches: %d" % ( images_tested, max_detections))
예제 #36
0
def get_test_image_and_shape():
    img = dlib.load_rgb_image(image_path)
    shape = utils.load_pickled_compatible(shape_path)
    return img, shape
예제 #37
0
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np

# In[48]:

# dlib는 image 관련
# detector는 이미지의 얼굴을 찾아줌
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('../models/shape_predictor_5_face_landmarks.dat')

# In[49]:

img = dlib.load_rgb_image('../imgs/12.jpg')
plt.figure(figsize=(16, 10))
plt.imshow(img)
plt.show()

# In[50]:

img_result = img.copy()
dets = detector(img, 1)
# detector에 이미지 주면 얼굴 찾아줌
if len(dets) == 0:  # 사진에 얼굴이 없으면
    print('cannot find faces!')
else:  # 사진에 얼굴이 있으면 사각형 그려줌
    fig, ax = plt.subplots(1, figsize=(16, 10))
    for det in dets:
        x, y, w, h = det.left(), det.top(), det.width(), det.height()