def main_camera():
    #network initialize of cnn for stereo faces anti spoofing
    modelpath = "./checkpoint_resnet/"
    hpr = stereoSpoofingRecognition(modelpath, 0.7)
    centerface = CenterFace(DETHEIGHT, DETWIDTH)

    test_video(centerface, hpr, 0, calibed=True)
Exemple #2
0
def test_image_tensorrt():
    frame = cv2.imread("prj-python/000388.jpg")
    # * set height and width
    h, w = 480, 640  # must be 480* 640
    landmarks = False
    centerface = CenterFace(landmarks=landmarks)
    if landmarks:
        dets, lms = centerface(frame, h, w, threshold=0.35)
    else:
        dets = centerface(frame, h, w, threshold=0.35)
    print("count = ", len(dets))

    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(
            frame,
            (int(boxes[0]), int(boxes[1])),
            (int(boxes[2]), int(boxes[3])),
            (2, 255, 0),
            1,
        )
    if landmarks:
        for lm in lms:
            for i in range(0, 5):
                cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2,
                           (0, 0, 255), -1)
    # Using cv2.imwrite() method
    # Saving the image
    cv2.imwrite("res_000388.jpg", frame)
    # cv2.imshow("out", frame)
    cv2.waitKey(0)
Exemple #3
0
def test_image():
    frame = cv2.imread('000388.jpg')
    h, w = frame.shape[:2]
    landmarks = True
    centerface = CenterFace(h, w, landmarks=landmarks)
    if landmarks:
        dets, lms = centerface(frame, threshold=0.35)
    else:
        dets = centerface(frame, threshold=0.35)

    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                      (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    if landmarks:
        for lm in lms:
            cv2.circle(frame, (int(lm[0]), int(lm[1])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[2]), int(lm[3])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[4]), int(lm[5])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[8]), int(lm[9])), 2, (0, 0, 255), -1)

    print('python face num:', len(dets))
    cv2.imshow('python', frame)
    cv2.waitKey(0)
def test_widerface():
    Path = 'widerface/WIDER_val/images/'
    wider_face_mat = sio.loadmat(
        'widerface/wider_face_split/wider_face_val.mat')
    event_list = wider_face_mat['event_list']
    file_list = wider_face_mat['file_list']
    save_path = 'save_out/'

    for index, event in enumerate(event_list):
        file_list_item = file_list[index][0]
        im_dir = event[0][0]
        # print(save_path + im_dir)
        if not os.path.exists(save_path + im_dir):
            os.makedirs(save_path + im_dir)
        landmarks = True
        centerface = CenterFace(landmarks=landmarks)
        for num, file in enumerate(file_list_item):
            im_name = file[0][0]
            zip_name = '%s/%s.jpg' % (im_dir, im_name)
            print(os.path.join(Path, zip_name))
            img = cv2.imread(os.path.join(Path, zip_name))
            h, w = img.shape[:2]
            if landmarks:
                dets, lms = centerface(img, h, w, threshold=0.05)
            else:
                dets = centerface(img, threshold=0.05)
            f = open(save_path + im_dir + '/' + im_name + '.txt', 'w')
            f.write('{:s}\n'.format('%s/%s.jpg' % (im_dir, im_name)))
            f.write('{:d}\n'.format(len(dets)))
            for b in dets:
                x1, y1, x2, y2, s = b
                f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(
                    x1, y1, (x2 - x1 + 1), (y2 - y1 + 1), s))
            f.close()
            print('event:%d num:%d' % (index + 1, num + 1))
Exemple #5
0
def test_image(landmarks=False):
    frame = cv2.imread('imgs/17.jpg')
    # frame = cv2.resize(frame, (640, 640))
    h, w = frame.shape[:2]
    centerface = CenterFace(h, w, landmarks=landmarks)
    if landmarks:
        dets, lms = centerface(frame, threshold=0.3)
    else:
        dets = centerface(frame, threshold=0.3)

    for det in dets:

        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                      (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    if landmarks:
        for lm in lms:
            cv2.circle(frame, (int(lm[0]), int(lm[1])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[2]), int(lm[3])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[4]), int(lm[5])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[8]), int(lm[9])), 2, (0, 0, 255), -1)
    cv2.imwrite('centerface.jpg', frame)
    cv2.imshow('out', frame)
    cv2.waitKey(0)
Exemple #6
0
def camera(count_im):
    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()
    h, w = frame.shape[:2]
    centerface = CenterFace()
    global org_image
    while True:
        ret, frame = cap.read()
        org_frame = frame.copy()
        croped_face = frame.copy()
        dets, lms = centerface(frame, h, w, threshold=0.35)
        org_image = frame.copy()
        for det in dets:
            boxes, score = det[:4], det[4]
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                          (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
            croped_face = org_frame[int(boxes[1]):int(boxes[3]),
                                    int(boxes[0]):int(boxes[2])]

        cv2.imshow('out', frame)
        cv2.imshow('face', croped_face)
        cv2.imwrite(save_path + 'img_{}.jpg'.format(count_im), croped_face)
        count_im += 1
        # Press Q on keyboard to stop recording
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
Exemple #7
0
def test_image(img_path):
    """[Face detection]

    Args:
        img_path ([str]): [path to the image]
    """
    frame = cv2.imread(img_path)
    h, w = frame.shape[:2]
    landmarks = True
    centerface = CenterFace(landmarks=landmarks)
    if landmarks:
        dets, lms = centerface(frame, h, w, threshold=0.35)
    else:
        dets = centerface(frame, threshold=0.35)

    for det in dets:
        #boxes, score = det[:4], det[4]
        boxes = det[:4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                      (int(boxes[2]), int(boxes[3])), (2, 255, 0), 2)
    if landmarks:
        for lm in lms:
            for i in range(0, 5):
                cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2,
                           (0, 0, 255), -1)

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    plt.figure(figsize=(32, 32))
    plt.imshow(frame)
    #plt.savefig('result_' + img_path, bbox_inches='tight')
    plt.show()
Exemple #8
0
def detect_face_by_centerface(img) -> list:
    h, w = img.shape[:2]
    cf = CenterFace(landmarks=True)
    faces, landmarks = cf(img, h, w, threshold=0.5)
    boxes = []
    for face in faces:
        (x1, y1, x2, y2), score = face[:4], face[4]
        box = [int(y1), int(x2), int(y2), int(x1)]
        boxes.append(box)
    return boxes
Exemple #9
0
def test_image():
    frame = cv2.imread('000388.jpg')
    h, w = frame.shape[:2]
    centerface = CenterFace(h, w)
    dets = centerface(frame, threshold=0.5)
    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                      (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    cv2.imshow('out', frame)
    cv2.waitKey(0)
Exemple #10
0
def test_image(image_path, model_path):
    frame = cv2.imread(image_path)
    h, w = frame.shape[:2]
    landmarks = True
    centerface = CenterFace(model_path=model_path, landmarks=landmarks)
    centerface.transform(h, w)
    if landmarks:
        dets, lms = centerface(frame, threshold=0.35)
    else:
        dets = centerface(frame, threshold=0.35)

    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    if landmarks:
        for lm in lms:
            cv2.circle(frame, (int(lm[0]), int(lm[1])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[2]), int(lm[3])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[4]), int(lm[5])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[8]), int(lm[9])), 2, (0, 0, 255), -1)
    cv2.imshow('out', frame)
    cv2.waitKey(0)
Exemple #11
0
def test_image():
    frame = cv2.imread('000388.jpg')
    h, w = frame.shape[:2]
    centerface = CenterFace(h, w)
    dets, lms = centerface(frame, threshold=0.35)
    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    for lm in lms:
        cv2.circle(frame, (int(lm[0]), int(lm[1])), 2, (0, 0, 255), -1)
        cv2.circle(frame, (int(lm[2]), int(lm[3])), 2, (0, 0, 255), -1)
        cv2.circle(frame, (int(lm[4]), int(lm[5])), 2, (0, 0, 255), -1)
        cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)
        cv2.circle(frame, (int(lm[8]), int(lm[9])), 2, (0, 0, 255), -1)
    cv2.imshow('out', frame)
    cv2.waitKey(0)
Exemple #12
0
def camera():
    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()
    h, w = frame.shape[:2]
    centerface = CenterFace(h, w)
    while True:
        ret, frame = cap.read()
        dets = centerface(frame, threshold=0.5)
        for det in dets:
            boxes, score = det[:4], det[4]
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                          (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
        cv2.imshow('out', frame)
        # Press Q on keyboard to stop recording
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
Exemple #13
0
def video():
    cap = cv2.VideoCapture('Manif.mp4')
    ret, frame = cap.read()
    h, w = frame.shape[:2]
    centerface = CenterFace()
    fps = 48  # 视频每秒24帧
    size = (1920, 1080)  # 需要转为视频的图片的尺寸
    # 可以使用cv2.resize()进行修改
    video = cv2.VideoWriter("result01.mp4",
                            cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps,
                            size)

    cnt_frame = 1

    output_path = "./Output/Manifcsv.csv"
    file = open(output_path, "w")
    writer = csv.writer(file)
    writer.writerow(["Frame", "Faces list"])

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        dets, lms = centerface(frame, h, w, threshold=0.35)
        face_frame = []
        for det in dets:
            boxes, score = det[:4], det[4]
            face_frame.append(boxes)
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                          (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
        #for lm in lms:
        #   for i in range(0, 5):
        #      cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2, (0, 0, 255), -1)
        writer.writerow([cnt_frame, face_frame])

        cnt_frame += 1
        video.write(frame)
        cv2.imshow('out', frame)
        # Press Q on keyboard to stop recording
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    #dataframe = pd.DataFrame(columns = cnt_frame, data = face_list)
    #dataframe.to_csv('./Output/Manifcsv.csv',encoding = 'gbk')
    cap.release()
Exemple #14
0
def camera():
    cap = cv2.VideoCapture("prj-python/demo.mp4")
    ret, frame = cap.read()
    h, w = frame.shape[:2]
    centerface = CenterFace()
    while True:
        ret, frame = cap.read()
        dets, lms = centerface(frame, h, w, threshold=0.35)
        for det in dets:
            boxes, score = det[:4], det[4]
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                          (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
        for lm in lms:
            for i in range(0, 5):
                cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2,
                           (0, 0, 255), -1)
        cv2.imshow('out', frame)
        # Press Q on keyboard to stop recording
        if cv2.waitKey(50) & 0xFF == ord('q'):
            break
    cap.release()
def test_image():
    frame = cv2.imread('000388.jpg')
    h, w = frame.shape[:2]
    landmarks = True
    centerface = CenterFace(landmarks=landmarks)
    if landmarks:
        dets, lms = centerface(frame, h, w, threshold=0.35)
    else:
        dets = centerface(frame, threshold=0.35)

    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                      (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    if landmarks:
        for lm in lms:
            for i in range(0, 5):
                cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2,
                           (0, 0, 255), -1)
    cv2.imshow('out', frame)
    cv2.waitKey(0)
Exemple #16
0
def test_image_tensorrt():
    frame = cv2.imread('../prj-python/000388.jpg')
    h, w = 480, 640  # must be 480* 640
    landmarks = True
    centerface = CenterFace()
    if landmarks:
        dets, lms = centerface(frame, h, w, threshold=0.35)
        print("count = ", len(dets))
    else:
        dets = centerface(frame, threshold=0.35)

    for det in dets:
        boxes, score = det[:4], det[4]
        cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                      (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
    if landmarks:
        for lm in lms:
            for i in range(0, 5):
                cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2,
                           (0, 0, 255), -1)
    cv2.imshow('out', frame)
    cv2.waitKey(0)
def camera():
    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()
    h, w = frame.shape[:2]
    centerface = CenterFace()
    global org_image
    while True:
        ret, frame = cap.read()
        dets, lms = centerface(frame, h, w, threshold=0.35)
        org_image = frame.copy()
        hsvframe = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)[:, :, 0]
        for det in dets:
            boxes, score = det[:4], det[4]
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                          (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
        for lm in lms:
            face_points = []
            for i in range(0, 5):
                cv2.circle(frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2,
                           (0, 0, 255), -1)
                face_points.append([int(lm[i * 2]), int(lm[i * 2 + 1])])

            # Extract color of region
            avg_top = calculate_avg_color(hsvframe, face_points[0:2])
            avg_bot = calculate_avg_color(hsvframe, face_points[2:5])
            if avg_top and avg_bot:
                diff = np.abs(avg_top - avg_bot) / max(avg_top, avg_bot) * 100
                print(diff)
                if diff <= 30:
                    cv2.putText(frame, "Vui long mang khau trang ",
                                (int(boxes[0]), int(boxes[1])),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)

        cv2.imshow('out', frame)
        # Press Q on keyboard to stop recording
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
Exemple #18
0
def test_images(imglist, output_dir):

    num_images = len(imglist)

    for i in xrange(num_images):
        # print('img{}:{}'.format(i,imglist[i]))
        # im = cv2.imread(imglist[i])
        frame = cv2.imread(imglist[i])
        myboxes = []
        assert frame is not None
        h, w = frame.shape[:2]
        centerface = CenterFace(h, w)
        dets, lms = centerface(frame, threshold=0.35)
        for det in dets:
            boxes, score = det[:4], det[4]
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])),
                          (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
            # print( (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])))
            box = [
                'Faces', 0,
                (int(boxes[0]), int(boxes[1]), int(boxes[2]), int(boxes[3]))
            ]
            myboxes.append(box)

        # for lm in lms:
        #     cv2.circle(frame, (int(lm[0]), int(lm[1])), 2, (0, 0, 255), -1)
        #     cv2.circle(frame, (int(lm[2]), int(lm[3])), 2, (0, 0, 255), -1)
        #     cv2.circle(frame, (int(lm[4]), int(lm[5])), 2, (0, 0, 255), -1)
        #     cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)
        #     cv2.circle(frame, (int(lm[8]), int(lm[9])), 2, (0, 0, 255), -1)
        cv2.imshow('out', frame)
        cv2.imwrite(
            output_dir + os.path.basename(imglist[i]).split('.')[0] + '.jpg',
            frame)
        write_xml(imglist[i], w, h, myboxes, output_dir)
        cv2.waitKey(500)
Exemple #19
0
def camera():
    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()
    h, w = frame.shape[:2]
    centerface = CenterFace(h, w)
    while True:
        ret, frame = cap.read()
        dets, lms = centerface(frame, threshold=0.5)
        for det in dets:
            boxes, score = det[:4], det[4]
            cv2.rectangle(frame, (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
        for lm in lms:
            cv2.circle(frame, (int(lm[0]), int(lm[1])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[2]), int(lm[3])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[4]), int(lm[5])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[6]), int(lm[7])), 2, (0, 0, 255), -1)
            cv2.circle(frame, (int(lm[8]), int(lm[9])), 2, (0, 0, 255), -1)

        print('rknn face num:', len(dets))
        cv2.imshow('rknn', frame)
        # Press Q on keyboard to stop recording
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
Exemple #20
0
import cv2
import numpy as np
from PIL import ImageFile, Image
from centerface import CenterFace


net = CenterFace()

def ensure_cv(im):
    if type(im) == str:
        im = cv2.imread(im)
    elif isinstance(im, ImageFile.ImageFile):
        im = np.array(im)[:, :, :3]
        im = im[:, :, ::-1]
    return im

def detect(im):
    global net
    im = ensure_cv(im)
    h, w = im.shape[:2]
    dets, lms = net(im, h, w, threshold=0.35)
    res = []
    for (det, lm) in zip(dets, lms):
        det = list(map(lambda x: round(x, 2), det))
        lm = list(map(lambda x: round(x, 2), lm))
        res.append([det, lm])
    return res


def align(im, tsize=128, desiredLeftEye=0.35):
    im = ensure_cv(im)
Exemple #21
0
 def __init__(self, event_interval=6):
     self.controller = Controller(event_interval=event_interval)
     self.detector = CenterFace()
     self.trackers = []
import cv2
import scipy.io as sio
import os
import numpy as np
from tqdm import tqdm
import glob
from centerface import CenterFace
landmarks = True
centerface = CenterFace(landmarks=landmarks)


#
def test_image(img_path, save_path):
    #
    img_src = cv2.imread(img_path)
    img_src = np.rot90(img_src, axes=(1, 0))
    [img_h, img_w, img_c] = img_src.shape

    dets, lms = centerface(img_src, img_h, img_w, threshold=0.35)
    det = dets[0]
    boxes, score = det[:4], det[4]
    roi = [boxes[0], boxes[1], boxes[2], boxes[3]]

    w = int(roi[2] - roi[0] + 1)
    h = int(roi[3] - roi[1] + 1)
    if w > h:
        roi[1] = roi[1] - (w - h) // 2
        roi[3] = roi[3] + (w - h - (w - h) // 2)
    elif h > w:
        roi[0] = roi[0] - (h - w) // 2
        roi[2] = roi[2] + (h - w - (h - w) // 2)
Exemple #23
0
import cv2

from centerface import CenterFace

image = cv2.imread("images/demo.jpg")
h, w = image.shape[:2]

centerface = CenterFace()
dets, lms = centerface(image, h, w, threshold=0.35)

for det in dets:
    boxes, score = det[:4], det[4]
    cv2.rectangle(
        image,
        (int(boxes[0]), int(boxes[1])),
        (int(boxes[2]), int(boxes[3])),
        (2, 255, 0),
        1,
    )

for lm in lms:
    for i in range(0, 5):
        cv2.circle(image, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2, (0, 0, 255),
                   -1)

cv2.imshow("", image)
cv2.waitKey(0)
Exemple #24
0
import cv2
import torch
from centerface import CenterFace

save_folder = "./prediction/"
dataset_folder = "../widerface_val/images/"

if __name__ == '__main__':
    torch.set_grad_enabled(False)
    testset_folder = dataset_folder
    testset_list = dataset_folder[:-7] + "wider_val.txt"

    with open(testset_list, 'r') as fr:
        test_dataset = fr.read().split()
    num_images = len(test_dataset)
    centerface = CenterFace(landmarks=True)

    for i, img_name in enumerate(test_dataset):
        ############################# Add face detection here#######################################
        image_path = testset_folder + img_name
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)
        h, w = img.shape[:2]
        dets, lms = centerface(img, h, w, threshold=0.35)
        ############################################################################################
        save_name = save_folder + img_name[:-4] + ".txt"
        dirname = os.path.dirname(save_name)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)
        with open(save_name, "w") as fd:
            bboxs = dets
            file_name = os.path.basename(save_name)[:-4] + "\n"