コード例 #1
0
def video(video_path_or_cam):
    facer = FaceAna()
    vide_capture = cv2.VideoCapture(video_path_or_cam)

    while 1:

        ret, image = vide_capture.read()

        pattern = np.zeros_like(image)

        img_show = image.copy()

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        star = time.time()
        boxes, landmarks, states = facer.run(image)

        duration = time.time() - star
        print('one iamge cost %f s' % (duration))

        for face_index in range(landmarks.shape[0]):

            #######head pose need develop
            # reprojectdst, euler_angle=get_head_pose(landmarks[face_index],img_show)

            if args.mask:
                face_bbox_keypoints = np.concatenate(
                    (landmarks[face_index][:17, :],
                     np.flip(landmarks[face_index][17:27, :], axis=0)),
                    axis=0)

                pattern = cv2.fillPoly(pattern,
                                       [face_bbox_keypoints.astype(np.int)],
                                       (1., 1., 1.))

            # for start, end in line_pairs:
            #     cv2.line(img_show, reprojectdst[start], reprojectdst[end], (0, 0, 255),2)
            #
            # cv2.putText(img_show, "X: " + "{:7.2f}".format(euler_angle[0, 0]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
            #             0.75, (0, 0, 0), thickness=2)
            # cv2.putText(img_show, "Y: " + "{:7.2f}".format(euler_angle[1, 0]), (20, 50), cv2.FONT_HERSHEY_SIMPLEX,
            #             0.75, (0, 0, 0), thickness=2)
            # cv2.putText(img_show, "Z: " + "{:7.2f}".format(euler_angle[2, 0]), (20, 80), cv2.FONT_HERSHEY_SIMPLEX,
            #             0.75, (0, 0, 0), thickness=2)

            for landmarks_index in range(landmarks[face_index].shape[0]):
                x_y = landmarks[face_index][landmarks_index]
                cv2.circle(img_show, (int(x_y[0]), int(x_y[1])), 3,
                           (222, 222, 222), -1)

        cv2.namedWindow("capture", 0)
        cv2.imshow("capture", img_show)

        if args.mask:
            cv2.namedWindow("masked", 0)
            cv2.imshow("masked", image * pattern)

        key = cv2.waitKey(1)
        if key == ord('q'):
            return
コード例 #2
0
def images(image_dir):
    facer = FaceAna()

    image_list = os.listdir(image_dir)
    image_list = [x for x in image_list if 'jpg' in x or 'png' in x]
    image_list.sort()

    for idx, image_name in enumerate(image_list):

        image = cv2.imread(os.path.join(image_dir, image_name))

        pattern = np.zeros_like(image)

        img_show = image.copy()

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        star = time.time()
        boxes, landmarks, states = facer.run(image)

        ###no track
        facer.reset()

        duration = time.time() - star
        print('%d %s time: %fs' % (idx, image_name, duration))

        for face_index in range(landmarks.shape[0]):

            #######head pose
            #reprojectdst, euler_angle=get_head_pose(landmarks[face_index],img_show)

            if args.mask:
                face_bbox_keypoints = np.concatenate(
                    (landmarks[face_index][:17, :],
                     np.flip(landmarks[face_index][17:27, :], axis=0)),
                    axis=0)

                pattern = cv2.fillPoly(pattern,
                                       [face_bbox_keypoints.astype(np.int)],
                                       (1., 1., 1.))

            # for start, end in line_pairs:
            #     cv2.line(img_show, reprojectdst[start], reprojectdst[end], (0, 0, 255),2)
            #
            # cv2.putText(img_show, "X: " + "{:7.2f}".format(euler_angle[0, 0]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
            #             0.75, (0, 0, 0), thickness=2)
            # cv2.putText(img_show, "Y: " + "{:7.2f}".format(euler_angle[1, 0]), (20, 50), cv2.FONT_HERSHEY_SIMPLEX,
            #             0.75, (0, 0, 0), thickness=2)
            # cv2.putText(img_show, "Z: " + "{:7.2f}".format(euler_angle[2, 0]), (20, 80), cv2.FONT_HERSHEY_SIMPLEX,
            #             0.75, (0, 0, 0), thickness=2)

            for landmarks_index in range(landmarks[face_index].shape[0]):

                x_y = landmarks[face_index][landmarks_index]
                cv2.circle(img_show, (int(x_y[0]), int(x_y[1])), 3,
                           (222, 222, 222), -1)

            cv2.imwrite("results/" + image_name, img_show)
コード例 #3
0
import cv2
import time
import imageio
import numpy as np

from lib.core.api.facer import FaceAna

from lib.core.headpose.pose import get_head_pose, line_pairs

facer = FaceAna()


def video(video_path_or_cam):
    start_holder = False
    vide_capture = cv2.VideoCapture(video_path_or_cam)
    buff = []
    counter = 1
    while 1:
        counter += 1

        ret, image = vide_capture.read()
        #image = image[100:1000, :, :]
        image = cv2.resize(image, None, fx=2., fy=2.)
        pattern = np.zeros_like(image)

        img_show = image.copy()
        img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        star = time.time()
        boxes, landmarks, states = facer.run(img)
コード例 #4
0
import wear_mask
from PIL import Image
import numpy as np
import cv2
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from lib.core.api.facer import FaceAna

# image = cv2.imread('test_img/01.jpg')
image = cv2.imread('test_img/Aishwarya_Rai_0001.png')
facer = FaceAna()
boxes, landmarks, _ = facer.run(image)
face_image = wear_mask.mask_img('test_img/Aishwarya_Rai_0001.png', 'images',
                                landmarks)
# cv2.imshow(face_image)
# face_image = np.array(face_image)
face_image.show()
コード例 #5
0
from lib.core.api.facer import FaceAna
import numpy as np
import cv2


def to_np_array(image_data, dtype=np.uint8):
    width = image_data.width
    height = image_data.height
    pixel_length = image_data.pixel_length
    data = image_data.data
    assert len(data) == width * height * pixel_length
    return np.frombuffer(data, dtype=dtype).reshape(
        (height, width, pixel_length))


facer = FaceAna()
# remind 消除初始化时间
boxes, landmarks, states = facer.run(
    cv2.imread("D:\pycharmProject\Peppa_Pig_Face_Engine\img\\test_32.jpg"))


class GrpcServer(service.FaceServerServicer):
    def predict(self, request, context):
        star = time.time()
        # print(request.frame)
        image = to_np_array(request.img)
        # print(image.shape)
        # cv2.imwrite("D:\pycharmProject\Peppa_Pig_Face_Engine\img\\test_%d.jpg" % (request.frame), image)
        # print(request.frame, 'reshape cost %f s' % (time.time() - star))
        boxes, landmarks, states = facer.run(image)
        print(request.frame, 'detect cost %f s' % (time.time() - star))
コード例 #6
0
 def __init__(self):
     self.facer = FaceAna()
     self.frame = 0
     self.inc = 1
コード例 #7
0
class Test:
    def __init__(self):
        self.facer = FaceAna()
        self.frame = 0
        self.inc = 1

    # 被周期性调度触发函数
    def printTime(self):
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.frame)
        self.frame = 0
        t = Timer(self.inc, self.printTime, ())
        t.start()

    def video(self, video_path_or_cam):
        vide_capture = cv2.VideoCapture(video_path_or_cam)

        # vide_capture.set(3, 640)
        # vide_capture.set(4, 480)

        while 1:

            ret, image = vide_capture.read()
            self.frame += 1
            pattern = np.zeros_like(image)

            img_show = image.copy()

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            star = time.time()
            boxes, landmarks, states = self.facer.run(image)

            duration = time.time() - star
            # print('one iamge cost %f s' % (duration))

            for face_index in range(landmarks.shape[0]):

                #######head pose
                reprojectdst, euler_angle = get_head_pose(
                    landmarks[face_index], img_show)

                if args.mask:
                    face_bbox_keypoints = np.concatenate(
                        (landmarks[face_index][:17, :],
                         np.flip(landmarks[face_index][17:27, :], axis=0)),
                        axis=0)

                    pattern = cv2.fillPoly(
                        pattern, [face_bbox_keypoints.astype(np.int)],
                        (1., 1., 1.))

                for start, end in line_pairs:
                    cv2.line(img_show, reprojectdst[start], reprojectdst[end],
                             (0, 0, 255), 2)

                cv2.putText(img_show,
                            "X: " + "{:7.2f}".format(euler_angle[0, 0]),
                            (20, 20),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 0, 0),
                            thickness=2)
                cv2.putText(img_show,
                            "Y: " + "{:7.2f}".format(euler_angle[1, 0]),
                            (20, 50),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 0, 0),
                            thickness=2)
                cv2.putText(img_show,
                            "Z: " + "{:7.2f}".format(euler_angle[2, 0]),
                            (20, 80),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 0, 0),
                            thickness=2)

                for landmarks_index in range(landmarks[face_index].shape[0]):
                    x_y = landmarks[face_index][landmarks_index]
                    cv2.circle(img_show, (int(x_y[0]), int(x_y[1])), 3,
                               (222, 222, 222), -1)

            cv2.namedWindow("capture", 0)
            cv2.imshow("capture", img_show)

            if args.mask:
                cv2.namedWindow("masked", 0)
                cv2.imshow("masked", image * pattern)

            key = cv2.waitKey(1)
            if key == ord('q'):
                return
コード例 #8
0
    def processVideo(self):
        if self.mode == 'train':
            from lib.core.api.facer import FaceAna
            from lib.core.headpose.pose import get_head_pose, line_pairs
            print('--------------- begin to load model -----------------------')
            facer = FaceAna()
            print('---------------- load model succcess ----------------------')

        print('process video_path ==> %s ' % self.video_path)
        # fil = open(self.label_path, 'a+')
        margin = 25
        crop_H = 400
        crop_W = 400 
        ## open video
        video_capture=cv2.VideoCapture(self.video_path)
        fps = video_capture.get(5) 
        frames = int(video_capture.get(7))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v') #opencv3.0
        videoWriter = cv2.VideoWriter(self.save_video_path, fourcc, fps, (crop_W, crop_H))

        RGB_mean = np.zeros((frames, 18)) #  RGB mean value of six ROIs
        landmarksAll = np.zeros((frames, 68, 2))

        for i in range(frames):
            valid_frame = 0
            ret, image = video_capture.read()
            # print(image.shape)
            img_show = image.copy()
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)## img_show is BGR format
            if self.mode == 'train':
                boxes, landmarks, states = facer.run(image) ## image is RGB format
            else:
                self.videoFPS = fps
                self.heartRate = ''
                with open(self.landmark_path, 'r') as fil:
                    lines = fil.readlines()
                line = lines[i].strip().split()
                line = [int(item) for item in line]
                landmarks = np.zeros((1, 68, 2), dtype='int32')
                landmarks[0, :, 0] = line[4::2]
                landmarks[0, :, 1] = line[5::2]

            # assert landmarks.shape[0]==1, 'More than one person or no person in this video'

            crop_img = np.zeros((crop_H, crop_W, 3))
            face_index = 0 ### 默认一张脸
            left_eye = landmarks[face_index][36:42]
            right_eye = landmarks[face_index][42:48]
            nose = landmarks[face_index][27:36]
            face = landmarks[face_index][0:27]

            leftup_eye_h = int(np.min(left_eye[..., 1])) # 左眼睛高度
            leftdown_eye_h = int(np.max(left_eye[..., 1])) # 左眼睛高度
            leftleft_eye_w = int(np.min(left_eye[..., 0])) # 左眼睛左端
            leftright_eye_w = int(np.max(left_eye[..., 0])) # 左眼睛右端

            left_eye_box1 = (leftleft_eye_w, leftdown_eye_h+5)
            left_eye_box2 = (leftright_eye_w, 2*leftdown_eye_h-leftup_eye_h+5)

            rightup_eye_h = int(np.min(right_eye[..., 1]))
            rightdown_eye_h = int(np.max(right_eye[..., 1]))
            rightleft_eye_w = int(np.min(right_eye[..., 0]))
            rightright_eye_w = int(np.max(right_eye[..., 0]))

            right_eye_box1 = (rightleft_eye_w, rightdown_eye_h+5)
            right_eye_box2 = (rightright_eye_w, 2*rightdown_eye_h-rightup_eye_h+5)

            noseup_h = int(np.min(nose[..., 1]))
            nosedown_h = int(np.max(nose[..., 1]))
            noseleft_w = int(np.min(nose[..., 0]))
            noseright_w = int(np.max(nose[..., 0]))

            faceup_h = int(np.min(face[..., 1]))
            facedown_h = min(image.shape[0], int(np.max(face[..., 1]))) ## 防止超出边界,下巴不在视频中也会被预测到
            faceleft_w = int(np.min(face[..., 0]))
            faceright_w = int(np.max(face[..., 0]))

            #start_left = int((crop_W-faceright_w+faceleft_w)/2)
            #start_up = int((crop_H - facedown_h + faceup_h)/2)
            #crop_img[start_up:start_up+facedown_h-faceup_h, start_left:start_left+faceright_w-faceleft_w, :] = img_show[faceup_h:facedown_h, faceleft_w:faceright_w, :]
            #videoWriter.write(crop_img.astype('uint8'))
            window_H = facedown_h - faceup_h
            window_W = faceright_w - faceleft_w ### window_H和window_W是截取的框的大小
            if window_H>window_W:
                ## 截取高度大于宽度,扩大宽度
                window_up_h = faceup_h
                window_down_h = facedown_h
                diff = int((window_H-window_W)/2)
                window_left_w = faceleft_w - diff
                window_right_w = window_left_w + window_H
                window_W = window_H
            elif window_H < window_W:
                ## 截取宽度大于高度,扩大高度
                window_left_w = faceleft_w
                window_right_w = faceright_w
                diff = int((window_W-window_H)/2)
                window_up_h = faceup_h - diff
                window_down_h = window_up_h + window_W
                window_H = window_W
            else:
                window_up_h = faceup_h
                window_down_h =facedown_h
                window_left_w = faceleft_w
                window_right_w = faceright_w
            ### (window_up_h, window_left_w)和(window_down_h, window_right_w)构成截取框
            factor = crop_H/window_H  ## 缩放倍数
            tmp_img = img_show[window_up_h:window_down_h, window_left_w:window_right_w, :].astype('uint8')
            crop_img = cv2.resize(tmp_img, (crop_H, crop_W), 
                                  interpolation=cv2.INTER_CUBIC).astype('uint8') ## cubic插值缩放
            # videoWriter.write(crop_img)
            # plt.imshow(crop_img[..., ::-1])
            # plt.show()

            """
            处理数据
            """
            rela_coordinates = np.zeros((1, 2)) # 由于crop引入的相对坐标
            rela_coordinates[0, 0] = int((window_up_h+window_down_h-crop_H)/2)
            rela_coordinates[0, 1] = int((window_left_w+window_right_w-crop_W)/2)
            # landmarksAll[i, ...] = (landmarks[face_index, :, ::-1] - rela_coordinates)*factor ## 保存相对坐标(h, w),landmarks为(w, h)
            """
            extract region of interest
            """
            roi_1 = img_show[noseup_h-10:nosedown_h,noseleft_w:noseright_w, ::-1] ## BGR->RGB
            roi_2 = img_show[min(leftup_eye_h, rightup_eye_h):max(leftdown_eye_h, rightdown_eye_h), leftright_eye_w:rightleft_eye_w, ::-1] ## BGR->RGB
            roi_3 = img_show[leftdown_eye_h+5:2*leftdown_eye_h-leftup_eye_h+5, leftleft_eye_w:leftright_eye_w, ::-1] ## BGR->RGB
            roi_4 = img_show[rightdown_eye_h+5:2*rightdown_eye_h-rightup_eye_h+5, rightleft_eye_w:rightright_eye_w, ::-1] ## BGR->RGB
            
            mask_6 = np.zeros(img_show.shape[0:2]) ## region 5的mask
            mask_6[faceup_h:facedown_h, leftleft_eye_w:rightright_eye_w] = 1.0

            mask_face = np.zeros(img_show.shape[0:2]) ## 脸的mask
            land1 = landmarks[face_index][0:17, ...].reshape(1, 17, 2)
            land2 = landmarks[face_index][26:16:-1, ...].reshape(1, 10, 2)
            land = np.concatenate([land1, land2], axis=1).astype('int32') ## 逆时针的脸部landmarks
            cv2.polylines(mask_face, land, 1, 255)
            cv2.fillPoly(mask_face, land, 255)
            mask_face = mask_face/255.0
            
            roi_5 = img_show.transpose((2, 0, 1))[::-1, ...] ##BGR->RGB
            roi_5 = roi_5*mask_face
            roi_5 = roi_5.reshape((3, -1))

            mask_6 = mask_6*mask_face
            roi_6 = img_show.transpose((2, 0, 1))[::-1, ...] ## BGR->RGB
            roi_6 = roi_6*mask_6
            roi_6 = roi_6.reshape((3, -1))
            """
            计算RGB平均值
            """
            RGB_1 = np.mean(np.reshape(roi_1/255.0, (-1, 3)), axis=0)
            RGB_2 = np.mean(np.reshape(roi_2/255.0, (-1, 3)), axis=0)
            RGB_3 = np.mean(np.reshape(roi_3/255.0, (-1, 3)), axis=0)
            RGB_4 = np.mean(np.reshape(roi_4/255.0, (-1, 3)), axis=0)
            RGB_5 = np.sum(roi_5/255.0, axis=1)/np.sum(mask_face)
            RGB_6 = np.sum(roi_6/255.0, axis=1)/np.sum(mask_6)
            RGB = np.concatenate([RGB_1, RGB_2, RGB_3, RGB_4, RGB_5, RGB_6])
            if np.sum(np.isnan(RGB)) == 0: ##valid value
                landmarksAll[valid_frame, ...] = (landmarks[face_index, :, ::-1] - rela_coordinates)*factor ## 保存相对坐标(h, w),landmarks为(w, h)
                RGB_mean[valid_frame, ...] = RGB ## save valid RGB
                videoWriter.write(crop_img)
                valid_frame += 1

        # videoWriter.release() ## save cropped video
        video_capture.release()
        print('Crop video successfully to ==> %s' % self.save_video_path)
        """
        保存信息
        """
        with open(self.label_path, 'a+') as fil:
            """
            save label
            """
            fil.write('video-%d, ' % self.video_index)
            fil.write(self.save_video_path+', ')
            fil.write(self.save_npz_path+', ')
            fil.write(str(self.videoFPS))
            fil.write(', ')
            fil.write(str(self.heartRate))
            fil.write('\n')
        print('save label ==> %s successfully' % self.label_path)
        np.savez(self.save_npz_path, landmarks=landmarksAll, RGB_mean=RGB_mean)
        print("save npz ==> %s successfully" % self.save_npz_path)
コード例 #9
0
import sys
import cv2
import numpy as np

sys.path.append('../../GitHub_Face_Toolkit/Peppa_Pig_Face_Engine')
from lib.core.api.facer import FaceAna
from face_alignment.align_trans import get_reference_facial_points, warp_and_crop_face

facer = FaceAna()

landmarks_index = [68, 69, 30, 48, 54]  # 68关键点下的5点

crop_version = 'v2'
crop_size = 128
scale = crop_size / 112.0  # 相对于标准尺寸112,放大的倍数

reference = get_reference_facial_points(default_square=True)

if crop_version == 'v2':
    reference = (reference - 112.0 / 2) * 1.20 + 112.0 / 2
    reference[:, 1] = reference[:, 1] + -20

reference = reference * scale

cap = cv2.VideoCapture(0)  # 默认w=640, h=480
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

while True:
    ret, img_raw = cap.read()
    img_h, img_w = img_raw.shape[:2]
コード例 #10
0
import sys
sys.path.append('../Peppa_Pig_Face_Engine-master/')
sys.path.append('../')
from lib.core.api.facer import FaceAna

from lib.core.headpose.pose import get_head_pose, line_pairs

facer = FaceAna()

import torch
import torchvision.transforms as transforms
from face_modules.model import Backbone
from network.AEI_Net import *
import cv2
import PIL.Image as Image
import numpy as np
from face_modules.mtcnn_pytorch.src.align_trans import *


device = torch.device('cuda')

arcface = Backbone(50, 0.6, 'ir_se').to(device)
arcface.eval()
arcface.load_state_dict(torch.load('../face_modules/model_ir_se50.pth', map_location=device), strict=False)

test_transform = transforms.Compose([
    transforms.ColorJitter(0.1, 0.1, 0.1, 0.01),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])