Ejemplo n.º 1
0
def test_onet(inoutDir, outputDir, model):
    pnet, rnet, onet_jiang = create_mtcnn_net(
        p_model_path="./original_model/pnet_epoch.pt",
        r_model_path="./original_model/rnet_epoch.pt",
        o_model_path="./original_model/" + model + ".pt",
        use_cuda=False)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet_jiang,
                                   min_face_size=24)

    files = os.listdir(inoutDir)
    i = 0
    for image in files:
        i += 1
        image = os.path.join(inoutDir, image)

        img = cv2.imread(image)
        img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        landmarks2_jiang = mtcnn_detector.detect_onet_raw(img)

        vis_face_test(img_bg, landmarks2_jiang,
                      outputDir + model + "-" + str(i) + ".jpg")
        if i == 50:
            break
Ejemplo n.º 2
0
def gen_onet_data(data_dir,
                  anno_file,
                  pnet_model_file,
                  rnet_model_file,
                  prefix_path='',
                  use_cuda=True,
                  vis=False):

    pnet, rnet, _ = create_mtcnn_net(p_model_path=pnet_model_file,
                                     r_model_path=rnet_model_file,
                                     use_cuda=use_cuda)
    mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12)

    imagedb = ImageDB(anno_file, mode="test", prefix_path=prefix_path)
    imdb = imagedb.load_imdb()
    image_reader = TestImageLoader(imdb, 1, False)

    all_boxes = list()
    batch_idx = 0

    print('size:%d' % image_reader.size)
    for databatch in image_reader:
        if batch_idx % 50 == 0:
            print("%d images done" % batch_idx)

        im = databatch

        t = time.time()

        # pnet detection = [x1, y1, x2, y2, score, reg]
        p_boxes, p_boxes_align = mtcnn_detector.detect_pnet(im=im)

        # rnet detection
        boxes, boxes_align = mtcnn_detector.detect_rnet(im=im,
                                                        dets=p_boxes_align)

        if boxes_align is None:
            all_boxes.append(np.array([]))
            batch_idx += 1
            continue
        if vis:
            rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)
            vision.vis_two(rgb_im, boxes, boxes_align)

        t1 = time.time() - t
        t = time.time()
        all_boxes.append(boxes_align)
        batch_idx += 1

    save_path = './model_store'

    if not os.path.exists(save_path):
        os.mkdir(save_path)

    save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time()))
    with open(save_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    gen_onet_sample_data(data_dir, anno_file, save_file, prefix_path)
Ejemplo n.º 3
0
def gen_rnet_data(data_dir, anno_dir, pnet_model_file, use_cuda=True):
    ''' Generate the train data of RNet with trained-PNet '''

    # load trained pnet model
    pnet, _, _ = create_mtcnn_net(p_model_path=pnet_model_file,
                                  use_cuda=use_cuda)
    mtcnn_detector = MtcnnDetector(pnet=pnet, min_face_size=12)

    # load original_anno_file, length = 12880
    anno_file = os.path.join(anno_dir, 'anno_store/wide_anno_train.txt'
                             )  # TODO :: [local_wide_anno, wide_anno_train]
    imagedb = ImageDB(anno_file, mode='test', prefix_path='')
    imdb = imagedb.load_imdb()

    image_reader = TestImageLoader(imdb, 1, False)
    print('size:%d' % image_reader.size)

    batch_idx, all_boxes = 0, list()

    for databatch in image_reader:

        if (batch_idx + 1) % 100 == 0:
            print("%d images done" % (batch_idx + 1))
        im = databatch

        # obtain boxes and aligned boxes
        boxes_align = mtcnn_detector.detect_pnet(im=im)  # Time costly

        if boxes_align is None:
            all_boxes.append(np.array([]))
            batch_idx += 1
            continue

        # if vis:
        #     rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)
        #     vision.vis_two(rgb_im, boxes[:100, :], boxes_align[:100, :])

        all_boxes.append(boxes_align)
        batch_idx += 1

    save_path = os.path.join(anno_dir, 'rnet')

    if not os.path.exists(save_path):
        os.mkdir(save_path)

    save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time()))
    with open(save_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    gen_rnet_sample_data(data_dir, anno_dir, save_file)
Ejemplo n.º 4
0
def index():
    pnet, rnet, onet = create_mtcnn_net(
        p_model_path="./original_model/pnet_epoch.pt",
        r_model_path="./original_model/rnet_epoch.pt",
        o_model_path="./original_model/onet_epoch.pt",
        use_cuda=False)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24)

    #get_json=flask.request.get_json(force=True)

    print(request.data)
    base_data = request.json['image']

    img = base64_to_image(base_data)
    bboxs, landmarks = mtcnn_detector.detect_face(img)

    #初始化一下json
    res = {}
    faces = {}
    if bboxs.shape[0] < 1:
        res["success"] = False
        res["faces_detected"] = faces
        return flask.jsonify(res)
    else:
        res["success"] = True

    #这里开始处理一幅图中有多个人脸的情况
    for i in range(bboxs.shape[0]):
        x1 = int(bboxs[i][0])
        x2 = int(bboxs[i][2])
        y1 = int(bboxs[i][1])
        y2 = int(bboxs[i][3])
        face = img[y1:y2, x1:x2]

        face_name = "face_" + str(i)
        return_base64 = image_to_base64(face)
        faces[face_name] = return_base64

    res["faces_detected"] = faces
    return flask.jsonify(res)
Ejemplo n.º 5
0
def test(inoutDir, outputDir, model):  # 原模型的P,R,net + 自行训练后的Onet,展示并保存检测后的图片
    pnet, rnet, onet_jiang = create_mtcnn_net(
        p_model_path="./original_model/pnet_epoch.pt",
        r_model_path="./original_model/rnet_epoch.pt",
        o_model_path="./original_model/" + model + ".pt",
        use_cuda=False)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet_jiang,
                                   min_face_size=24)
    files = os.listdir(inoutDir)
    i = 0
    for image in files:
        i += 1
        image = os.path.join("./lfpw_test/", image)

        img = cv2.imread(image)
        img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bboxs, landmarks1 = mtcnn_detector.detect_face(img)  # 原始的图片用原始网络检测

        vis_face(img_bg, bboxs, landmarks1,
                 outputDir + model + "-" + str(i) + ".jpg")  # 保存图片
Ejemplo n.º 6
0
    def detect(self):
        pnet, rnet, onet = create_mtcnn_net(p_model_path=self.p_net_m,
                                            r_model_path=self.r_net_m,
                                            o_model_path=self.o_net_m,
                                            use_cuda=True)
        mtcnn_detector = MtcnnDetector(pnet=pnet,
                                       rnet=rnet,
                                       onet=onet,
                                       min_face_size=24,
                                       threshold=[0.1, 0.1, 0.1])

        event_list = os.listdir(self.image_dir)
        for event in event_list:
            print(event)
            event_dir = os.path.join(self.image_dir, event)
            res_dir = os.path.join(self.result_dir, event)
            if not os.path.exists(res_dir):
                os.makedirs(res_dir)
            images_list = os.listdir(event_dir)
            for images in images_list:
                images_path = os.path.join(event_dir, images)
                img = cv2.imread(images_path)
                bboxs, landmarks = mtcnn_detector.detect_face(img)
                if bboxs.shape[0] != 0:
                    bboxs[:, 2] = bboxs[:, 2] - bboxs[:, 0]
                    bboxs[:, 3] = bboxs[:, 3] - bboxs[:, 1]
                    bboxs[:, :4] = np.round(bboxs[:, :4])
                """ print(bboxs)
                save_name = 'r_304.jpg'
                vis_face(img,bboxs,landmarks, save_name) """
                fpath = os.path.join(res_dir, images[:-4] + '.txt')
                f = open(fpath, 'w')
                f.write(images[:-4] + '\n')
                f.write(str(bboxs.shape[0]) + '\n')
                for i in range(bboxs.shape[0]):
                    f.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.3f}\n'.format(
                        bboxs[i, 0], bboxs[i, 1], bboxs[i, 2], bboxs[i, 3],
                        bboxs[i, 4]))
                f.close()
Ejemplo n.º 7
0
import argparse
from mtcnn.core.detect import MtcnnDetector, create_mtcnn_net
import cv2

pnet, rnet, _ = create_mtcnn_net(
    p_model_path='/home/ding/code/face/pnet_epoch.pt',
    r_model_path='rnet_epoch.pt',
    use_cuda=False)
mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12)

# cap = cv2.VideoCapture(0)
# while (True):
#     ret, frame = cap.read()  # 读取一帧的图像
#     frame = cv2.resize(frame,(640,360))
#     frame = cv2.imread('face.jpg')
#     boxes, boxes_align = mtcnn_detector.detect_pnet(im=frame)
#     rboxes, rboxes_align = mtcnn_detector.detect_rnet(im=frame, dets=boxes_align)
#     oboxes,olandmark = mtcnn_detector.detect_onet(im=frame,dets=rboxes_align)
#     print(oboxes)
#     for box in oboxes:
#         cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 0, 128), 2)
#     cv2.imshow('Face Recognition', frame)
#     if cv2.waitKey(1) & 0xFF == ord('q'):
#         break
# cap.release()  # 释放摄像头
# cv2.destroyAllWindows()

frame = cv2.imread(
    '/web2/ding/face/WIDER_train/images/12--Group/12_Group_Large_Group_12_Group_Large_Group_12_245.jpg'
)
boxes, boxes_align = mtcnn_detector.detect_pnet(im=frame)
Ejemplo n.º 8
0
def gen_rnet_data(data_dir,
                  anno_file,
                  pnet_model_file,
                  prefix_path='',
                  use_cuda=True,
                  vis=False):
    """
    :param data_dir: train data
    :param anno_file:
    :param pnet_model_file:
    :param prefix_path:
    :param use_cuda:
    :param vis:
    :return:
    """

    # load trained pnet model
    pnet, _, _ = create_mtcnn_net(p_model_path=pnet_model_file,
                                  use_cuda=use_cuda)
    mtcnn_detector = MtcnnDetector(pnet=pnet, min_face_size=12)

    # load original_anno_file, length = 12880
    imagedb = ImageDB(anno_file, mode="test", prefix_path=prefix_path)
    imdb = imagedb.load_imdb()
    image_reader = TestImageLoader(imdb, 1, False)

    all_boxes = list()
    batch_idx = 0

    print('size:%d' % image_reader.size)
    for databatch in image_reader:
        if batch_idx % 100 == 0:
            print("%d images done" % batch_idx)
        im = databatch

        t = time.time()

        # obtain boxes and aligned boxes
        boxes, boxes_align = mtcnn_detector.detect_pnet(im=im)
        if boxes_align is None:
            all_boxes.append(np.array([]))
            batch_idx += 1
            continue
        if vis:
            rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)
            vision.vis_two(rgb_im, boxes, boxes_align)

        t1 = time.time() - t
        t = time.time()
        all_boxes.append(boxes_align)
        batch_idx += 1
        # if batch_idx == 100:
        # break
        # print("shape of all boxes {0}".format(all_boxes))
        # time.sleep(5)

    # save_path = model_store_path()
    # './model_store'
    save_path = './model_store'

    if not os.path.exists(save_path):
        os.mkdir(save_path)

    save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time()))
    with open(save_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    gen_rnet_sample_data(data_dir, anno_file, save_file, prefix_path)
Ejemplo n.º 9
0
import cv2
import os
from mtcnn.core.detect import create_mtcnn_net, MtcnnDetector
from mtcnn.core.vision import vis_face

CURRENT_DIR = os.path.dirname(__file__)

PNET_PATH = os.path.join(CURRENT_DIR, 'model_for_use', 'pnet_epoch.pt')
RNET_PATH = os.path.join(CURRENT_DIR, 'model_for_use', 'rnet_epoch.pt')
ONET_PATH = os.path.join(CURRENT_DIR, 'model_for_use', 'onet_epoch.pt')

if __name__ == '__main__':

    pnet, rnet, onet = create_mtcnn_net(p_model_path=PNET_PATH,
                                        r_model_path=RNET_PATH,
                                        o_model_path=ONET_PATH,
                                        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24)

    img = cv2.imread(CURRENT_DIR + '/eu1_input.jpg')
    img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    #b, g, r = cv2.split(img)
    #img2 = cv2.merge([r, g, b])

    bboxs, landmarks = mtcnn_detector.detect_face(img)
    # print box_align
    save_name = CURRENT_DIR + '/eu1_output.png'
    vis_face(img_bg,bboxs,landmarks, save_name)
def dete_signal_video():

    eye_class_dict = {0: "open_eye", 1: "close_eye", 2: "other"}
    point_nums = 24
    threshold = [0.6, 0.7, 0.7]
    data_trans = Transforms.Compose([
        Transforms.Resize((24, 24)),
        Transforms.ToTensor(),
        Transforms.Normalize((0.45, 0.448, 0.455), (0.082, 0.082, 0.082)),
        # Transforms.Normalize((0.407, 0.405, 0.412), (0.087, 0.087, 0.087)),
    ])
    mixnet = MixNet(input_size=(24, 24), num_classes=3)
    # eye_class_dict = {0:"open_eye",1:"close_eye"}
    # weight_dict = torch.load("weight/signal_eye/Mixnet_epoch_29.pth")
    weight_dict = torch.load(
        "/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/weight/relabel_04_mix_SGD_mutillabel_24_24_20210302/Mixnet_epoch_49.pth"
    )
    new_state_dict = OrderedDict()
    for k, v in weight_dict.items():
        name = k[7:]
        new_state_dict[name] = v

    mixnet.load_state_dict(new_state_dict)
    # stat(net,(3,48,48))
    mixnet.to('cuda:0')
    mixnet.eval()

    pnet, rnet, onet = create_mtcnn_net(
        p_model_path=r'model_store/final/pnet_epoch_19.pt',
        r_model_path=r'model_store/final/rnet_epoch_7.pt',
        o_model_path=r'model_store/final/onet_epoch_92.pt',
        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24,
                                   threshold=threshold)
    videos_root_path = 'test_video/hhh/02_65_6504_0_be4ba2aeac264ed992aae74c15b91b18.mp4'
    save_path_root = 'result_video/debug_test.avi'

    cap = cv2.VideoCapture(videos_root_path)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    fps = cap.get(cv2.CAP_PROP_FPS)
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    # tpa
    fname = os.path.splitext(os.path.split(videos_root_path)[1])[0]
    save_path = os.path.join(
        "/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/result_video/data(2)",
        fname + ".avi")
    out = cv2.VideoWriter(save_path_root, fourcc, fps, size)
    while True:
        ret, frame = cap.read()

        if ret:
            copy_frame = frame.copy()
            left_right_eye = []
            bboxs, landmarks, wearmask = mtcnn_detector.detect_face(frame,
                                                                    rgb=True)
            temp_path, trmp_name = os.path.split(save_path)
            # trmp_name = os.path.splitext(trmp_name)[0] + "{:04d}.jpg".format(img_count)
            # tsave_path = os.path.join(temp_path, trmp_name)
            if landmarks is not None:
                eye_wild_buf = []
                for i in range(landmarks.shape[0]):
                    landmarks_one = landmarks[i, :]
                    landmarks_one = landmarks_one.reshape((point_nums, 2))
                    left_eye = np.array(landmarks_one[[6, 8, 10, 11, 14], :])
                    xmin = np.min(left_eye[:, 0])
                    ymin = np.min(left_eye[:, 1])
                    xmax = np.max(left_eye[:, 0])
                    ymax = np.max(left_eye[:, 1])
                    left_right_eye.append([xmin, ymin, xmax, ymax])

                    # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)

                    right_eye = np.array(landmarks_one[[7, 9, 12, 13, 15], :])
                    xmin = np.min(right_eye[:, 0])
                    ymin = np.min(right_eye[:, 1])
                    xmax = np.max(right_eye[:, 0])
                    ymax = np.max(right_eye[:, 1])
                    left_right_eye.append([xmin, ymin, xmax, ymax])
                    # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)
                    #绘制眼睛点
                    # for j in [*left_eye,*right_eye]:
                    #     cv2.circle(frame, (int(j[0]), int(j[1])), 2, (255, 0, 0), -1)

                crop_img = []
                for xmin, ymin, xmax, ymax in left_right_eye:
                    w, h = xmax - xmin, ymax - ymin
                    # 随机扩展大小0.05-0.15
                    k = 0.1
                    ratio = h / w
                    if ratio > 1:
                        ratio = ratio - 1
                        xmin -= (ratio / 2 * w + k * h)
                        ymin -= (k * h)
                        xmax += (ratio / 2 * w + k * h)
                        ymax += (k * h)

                    else:
                        ratio = w / h - 1
                        xmin -= (k * w)
                        ymin -= (ratio / 2 * h + k * w)
                        xmax += (k * w)
                        ymax += (ratio / 2 * h + k * w)
                    eye_wild_buf.append(w)
                    cv2.rectangle(frame, (int(xmin), int(ymin)),
                                  (int(xmax), int(ymax)), (0, 255, 255), 1)
                    # 输出眼睛像素的长宽

                    temp_img = copy_frame[int(ymin):int(ymax),
                                          int(xmin):int(xmax)]
                    # temp_img = cv2.resize(temp_img,(24,24))
                    crop_img.append(temp_img)
                if len(crop_img) < 2:

                    cv2.imwrite(tsave_path, frame)
                    # out.write(frame)
                    continue
                # compose_img = np.hstack((crop_img[0],crop_img[1]))
            result_buff = []
            score_buff = []
            for i in crop_img:
                i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
                t1 = time.time()
                compose_img = Image.fromarray(i)
                img = data_trans(compose_img)
                img = img.unsqueeze(0)
                with torch.no_grad():
                    outputs = mixnet(img.to('cuda:0'))
                    spft_max = torch.nn.functional.softmax(outputs, dim=1)
                    # 左眼右眼,分别三个类别的分数
                    score_buff.append(spft_max.cpu().numpy())
                    # 0,1->data,id
                    score, result = torch.max(spft_max, 1)
                    # result:最大值的id score:最大值的分数
                    result_buff.append([result.item(), score])
                run_time = time.time() - t1
                #0.005819
            bias = 30
            eye_bias = 100
            for i in range(2):
                t_result = result_buff[i][0]
                #眼睛抠图的宽度
                eye_w = eye_wild_buf[i]
                cv2.putText(frame,"w:{}".format(int(eye_w)),(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-50),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,255) \
                    ,thickness=2)
                if 0 == t_result:
                    # eye_class = "close_eye"
                    # cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(0,255,0) \
                    # ,thickness=2)
                    eye_class = "open_eye:{:.2f}".format(
                        result_buff[i][1].cpu().item())
                    cv2.putText(frame,eye_class,(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-bias),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,255) \
                    ,thickness=2)
                elif 1 == t_result:
                    # eye_class = "open_eye"
                    # cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(255,0,255) \
                    # ,thickness=2)

                    eye_class = "close_eye:{:.2f}".format(
                        result_buff[i][1].cpu().item())
                    cv2.putText(frame,eye_class,(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-bias),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,255,0) \
                    ,thickness=2)
                else:
                    eye_class = "other:{:.2f}".format(
                        result_buff[i][1].cpu().item())
                    cv2.putText(frame,eye_class,(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-bias),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255) \
                    ,thickness=2)
                # bias += 30
                eye_bias = 0
                # left_eye
                left_eye_open, left_eye_close, left_eye_other = score_buff[0][
                    0]
                cv2.putText(frame,"left_open:{:.2f}".format(left_eye_open) ,(10, 20),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_close:{:.2f}".format(left_eye_close) ,(10, 40),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_other:{:.2f}".format(left_eye_other) ,(10, 60),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)

                #right_eye
                right_eye_open, right_eye_close, right_eye_other = score_buff[
                    1][0]
                cv2.putText(frame,"left_open:{:.2f}".format(right_eye_open) ,(200, 20),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_close:{:.2f}".format(right_eye_close) ,(200, 40),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_other:{:.2f}".format(right_eye_other) ,(200, 60),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
            # 计算最大概率的标号
            max_id,max_score = (result_buff[0][0],result_buff[0][1].cpu().item()) if \
                result_buff[0][1].cpu().item()>result_buff[1][1].cpu().item() else (result_buff[1][0],result_buff[1][1].cpu().item())
            # 测试信息
            eye_wild_buf_info = "w:[{:.2f},{:.2f}]".format(
                eye_wild_buf[0], eye_wild_buf[1])
            # 测试时那个眼镜框最大
            max_wilde_left_right = 0 if eye_wild_buf[0] > eye_wild_buf[1] else 1
            # 获得最大宽度框的id和分数
            # 宽度最大的 id 和分数 宽度第二大的 id和分数
            max_wilde_id,max_wilde_score,max_wiled_second_id,max_wilde_second_score = (result_buff[0][0],result_buff[0][1].cpu().item(),result_buff[1][0],result_buff[1][1].cpu().item()) if \
                max_wilde_left_right==0 else (result_buff[1][0],result_buff[1][1].cpu().item(),result_buff[0][0],result_buff[0][1].cpu().item())

            score_buff_info = "score:[left: {:.2f}] [right: {:.2f}]".format(
                score_buff[0][0][2], score_buff[1][0][2])
            cv2.putText(frame,eye_wild_buf_info,(400,80),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,0) \
                ,thickness=2)
            cv2.putText(frame,score_buff_info,(400,100),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,0) \
            ,thickness=2)

            # 如果

            # if np.any(np.array(eye_wild_buf[:2])<19.0 )and max_score < 0.9 or np.any(np.array(eye_wild_buf[:2])<17.0 ) or np.any(np.array([score_buff[0][0][2],score_buff[1][0][2]])>= 0.5) and \
            #     max_score<0.9 or max_id==2:
            # 添加最大框                                                                                                            概率最大id=2 宽度最大的id=2
            # if (eye_wild_buf[max_wilde_left_right]<17.0 ) or ((max_wilde_score>= 0.5) and \
            #     max_wilde_id==2 and max_wilde_second_score<0.85)  or max_id==2 and (max_wilde_score < 0.8 and max_wilde_id != 2) or (max_id==2 and max_wilde_id == 2 and(max_wilde_second_score<0.8) ) or \
            #         (max_wilde_id == 2 and max_wiled_second_id==2 and (max_wilde_second_score>0.5 or max_wilde_score>0.5)) or ( eye_wild_buf[ 0 if max_wilde_left_right else 1]<17.0 ) or \
            #             ((eye_wild_buf[ 0 if max_wilde_left_right else 1]>23 and max_wilde_second_score>0.8 and max_wilde_id==2) or \
            #                 (eye_wild_buf[max_wilde_left_right]>23 and max_wilde_score >0.8 and max_wiled_second_id==2)):
            # 左眼右眼宽度大于23 且概率大于0.8 且id=2
            # 存在小于17像素的框且最大宽度的分数小于0.8
            # 存在other概率大于0.5
            # 存在小于10像素直接判断为other


            if ((eye_wild_buf[ 0 if max_wilde_left_right else 1]>23 and max_wilde_second_score>0.8 and max_wiled_second_id==2) or \
                (eye_wild_buf[ max_wilde_left_right]>23 and max_wilde_score >0.8 and max_wilde_score==2) or \
                (np.any(np.array(eye_wild_buf[:2])<17.0) and (max_wilde_score<0.8)) or
                ((max_wilde_id==2 and max_wilde_score>0.5 and max_wilde_second_score<0.9) or (max_wiled_second_id==2 and max_wilde_second_score>0.5 and max_wilde_score<0.9)) or\
                (np.any(np.array(eye_wild_buf[:2])<10.0))
                    ):
                # 如果像素小于19且最大概率的眼睛小于0.9 或 任何一个像素小于12 且 max分数小于0.9 或 other
                # 2.任意一个other>=50
                cv2.putText(frame,"other",(400,60),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255) \
                ,thickness=2)
            # elif np.any(np.array([score_buff[0][0][1],score_buff[1][0][1]])>= 0.85)  \
            #      or (max_id==1 and max_score>0.750):
            elif (max_wilde_id==1 and max_wilde_score>=0.80)  \
                    or (max_id==1 and max_score>0.750):
                # elif (max_wilde_score >= 0.85) and max_wilde_id==1  \
                #      or (max_wilde_id==1 and max_wilde_score>0.750):
                # 任意一个闭眼概率大于0.9
                # 最大值是闭眼且概率大于0.75
                cv2.putText(frame,"close",(400,60),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,255,0) \
                ,thickness=2)
            else:
                cv2.putText(frame,"open",(400,60),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,0) \
                ,thickness=2)

                # cv2.imshow("frame",frame)

            out.write(frame)
        else:

            print("finish")
            break
Ejemplo n.º 11
0
                        for j in range(point_nums):
                            cv2.circle(frame, (int(landmarks_one[j, 0]),
                                               int(landmarks_one[j, 1])), 2,
                                       (255, 0, 0), -1)
                out.write(frame)
            else:
                break

        cap.release()
        out.release()
        cv2.destroyAllWindows()


if __name__ == '__main__':
    point_nums = 24
    threshold = [0.6, 0.7, 0.7]  # [0.99, 0.1, 0.6]  #
    pnet, rnet, onet = create_mtcnn_net(
        p_model_path=r'model_store/final/pnet_epoch_19.pt',
        r_model_path=r'model_store/final/rnet_epoch_7.pt',
        o_model_path=r'model_store/final/onet_epoch_92.pt',
        use_cuda=True)

    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24,
                                   threshold=threshold)
    videos_root_path = 'test_video/'
    save_path_root = 'result_video'
    detect_video(mtcnn_detector, videos_root_path, save_path_root)
Ejemplo n.º 12
0
import argparse
from mtcnn.core.detect import MtcnnDetector,create_mtcnn_net
import cv2
import time
from  mtcnn.config import *

pnet, rnet, onet = create_mtcnn_net(p_model_path=PNET_MODEL_PATH,r_model_path=RNET_MODEL_PATH,
                                 o_model_path=ONET_MODEL_PATH,use_cuda=False)
mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=48)



cap = cv2.VideoCapture(0)
f = 0
stime = time.time()
while (True):
    ret, frame = cap.read()  # 读取一帧的图像
    frame = cv2.resize(frame,(480,360))
    # frame = cv2.imread('face.jpg')
    boxes, boxes_align = mtcnn_detector.detect_pnet(im=frame)
    rboxes, rboxes_align = mtcnn_detector.detect_rnet(im=frame, dets=boxes_align)
    # oboxes,olandmark = mtcnn_detector.detect_onet(im=frame,dets=rboxes_align)
    if rboxes_align is not None:
        for box in rboxes_align:
            cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 0, 128), 2)
    if f % 20 == 0:
        fps = int(20/(time.time()-stime))
        f = 0
        stime = time.time()
    cv2.putText(frame, '{:d}fps'.format(fps), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(255, 0, 255), 2)
    cv2.imshow('Face Recognition', frame)
Ejemplo n.º 13
0
    def test_Onet_without_PRnet(self, annotation, outputDir, test_moudel, xxyy,
                                savePic):
        imagedb = ImageDB(annotation)
        gt_imdb = imagedb.load_imdb()
        pnet, rnet, onet_jiang = create_mtcnn_net(
            p_model_path="./original_model/pnet_epoch.pt",
            r_model_path="./original_model/rnet_epoch.pt",
            o_model_path="./original_model/" + test_moudel + ".pt",
            use_cuda=False)
        mtcnn_detector = MtcnnDetector(pnet=pnet,
                                       rnet=rnet,
                                       onet=onet_jiang,
                                       min_face_size=24)

        test_data = TrainImageReader(gt_imdb,
                                     48,
                                     batch_size=100,
                                     shuffle=False)  # 读入1个batch的数据
        # train_data.reset()
        total_errors = 0

        cnt = 0
        for i, (images, (gt_labels, gt_bboxes,
                         gt_landmarks)) in enumerate(test_data):  # 取1个batch
            list_imgs = [images[i, :, :, :]
                         for i in range(images.shape[0])]  # 100张图片

            list_bboxes = [gt_bboxes[i, :] for i in range(gt_bboxes.shape[0])]
            list_gt_landmarks = [
                gt_landmarks[i, :] for i in range(gt_landmarks.shape[0])
            ]
            mix = list(zip(list_imgs, list_bboxes, list_gt_landmarks))
            batch_errors = []

            for img, gt_bbox, gt_landmark in mix:  # 取1个图片
                if xxyy:
                    bboxs, landmarks = mtcnn_detector.detect_onet_xxyy(
                        img, gt_bbox)  # 原始的图片用原始网络检测,xxyy
                else:
                    bboxs, landmarks = mtcnn_detector.detect_onet(
                        img, gt_bbox)  # 原始的图片用原始网络检测,xxyy

                if landmarks.size:
                    cnt += 1
                    bboxs = bboxs[:1]  # 多个检测框保留第一个
                    landmarks = landmarks[:1]
                    if savePic:
                        vis_face(img, bboxs, landmarks,
                                 self.output_dir + str(cnt) + ".jpg")  # 保存图片
                    gt_landmark = np.array(gt_landmark).reshape(5, 2)
                    landmarks = np.array(landmarks).reshape(5, 2)

                    normDist = np.linalg.norm(gt_landmark[1] -
                                              gt_landmark[0])  # 左右眼距离
                    error = np.mean(
                        np.sqrt(np.sum(
                            (landmarks - gt_landmark)**2, axis=1))) / normDist

                    batch_errors.append(error)

            batch_errors = np.array(batch_errors).sum()
            total_errors += batch_errors
            print("%s:   %s pics mean error is %s" %
                  (datetime.datetime.now(), cnt, total_errors / cnt))
            if cnt > 999:
                print("%s:%s pics mean error is %s" %
                      (datetime.datetime.now(), cnt, total_errors / cnt))
                f = open("landmark_test.txt", "a+")
                f.write("%s, moudel_name:%s.pt, %s pics mean error is %s\n" %
                        (datetime.datetime.now(), test_moudel, cnt,
                         np.array(total_errors).reshape(1, -1).sum() / cnt))
                f.close()
                return

        print("%s:%s pics mean error is %s" %
              (datetime.datetime.now(), cnt, total_errors / cnt))
Ejemplo n.º 14
0
import cv2
from mtcnn.core.detect import create_mtcnn_net, MtcnnDetector
from mtcnn.core.vision import vis_face

if __name__ == '__main__':
    pnet, rnet, onet = create_mtcnn_net(
        p_model_path="./original_model/pnet_epoch.pt",
        r_model_path="./original_model/rnet_epoch.pt",
        o_model_path="./original_model/onet_epoch.pt",
        use_cuda=False)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24)

    img = cv2.imread("./s_l.jpg")
    img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # b, g, r = cv2.split(img)
    # img2 = cv2.merge([r, g, b])

    bboxs, landmarks = mtcnn_detector.detect_face(img)
    # print box_align
    save_name = 'r_4.jpg'
    vis_face(img_bg, bboxs, landmarks, save_name)
Ejemplo n.º 15
0
def gen_landmark48_data(data_dir,
                        anno_file,
                        pnet_model_file,
                        rnet_model_file,
                        prefix_path='',
                        use_cuda=True,
                        vis=False):

    anno_file = os.path.join(data_dir, anno_file)
    pnet, rnet, _ = create_mtcnn_net(p_model_path=pnet_model_file,
                                     r_model_path=rnet_model_file,
                                     use_cuda=use_cuda)
    mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12)

    imagedb = ImageDB(anno_file,
                      mode="test",
                      prefix_path=os.path.join(data_dir, 'img'))
    imdb = imagedb.load_imdb()
    image_reader = TestImageLoader(imdb, 1, False)

    all_boxes = list()
    batch_idx = 0

    for databatch in image_reader:
        if batch_idx % 500 == 0:
            print("%d images done" % batch_idx)
        im = databatch

        if im.shape[0] >= 1200 or im.shape[1] >= 1200:
            all_boxes.append(np.array([]))
            batch_idx += 1
            continue

        t = time.time()

        p_boxes, p_boxes_align = mtcnn_detector.detect_pnet(im=im)

        boxes, boxes_align = mtcnn_detector.detect_rnet(im=im,
                                                        dets=p_boxes_align)

        if boxes_align is None:
            all_boxes.append(np.array([]))
            batch_idx += 1
            continue
        if vis:
            rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)
            vision.vis_two(rgb_im, boxes, boxes_align)

        t1 = time.time() - t
        t = time.time()
        all_boxes.append(boxes_align)
        batch_idx += 1

    save_path = config.MODEL_STORE_DIR

    if not os.path.exists(save_path):
        os.mkdir(save_path)

    save_file = os.path.join(save_path, "detections_celeba.pkl")
    with open(save_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
Ejemplo n.º 16
0
import cv2
from mtcnn.core.detect import create_mtcnn_net, MtcnnDetector
from mtcnn.core.vision import vis_face

if __name__ == '__main__':
    #original model
    """ p_model_path = "./original_model/pnet_epoch.pt"
    r_model_path = "./original_model/rnet_epoch.pt"
    o_model_path = "./original_model/onet_epoch.pt" """
    #trained model
    p_model_path = "./original_model/pnet_epoch_train.pt"
    r_model_path = "./original_model/rnet_epoch_train.pt"
    o_model_path = "./original_model/onet_epoch_train.pt"
    pnet, rnet, onet = create_mtcnn_net(p_model_path=p_model_path,
                                        r_model_path=r_model_path,
                                        o_model_path=o_model_path,
                                        use_cuda=False)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24,
                                   threshold=[0.6, 0.7, 0.7])

    img = cv2.imread("1.jpg")
    img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    bboxs, landmarks = mtcnn_detector.detect_face(img)
    # print box_align
    save_name = 'r_1.jpg'
    vis_face(img_bg, bboxs, landmarks, save_name)
def dete_picture():

    eye_class_dict = {0: "open_eye", 1: "close_eye", 2: "other"}
    point_nums = 24
    threshold = [0.6, 0.7, 0.7]
    data_trans = Transforms.Compose([
        Transforms.Resize((24, 24)),
        Transforms.ToTensor(),
        Transforms.Normalize((0.45, 0.448, 0.455), (0.082, 0.082, 0.082)),
        # Transforms.Normalize((0.407, 0.405, 0.412), (0.087, 0.087, 0.087)),
    ])
    mixnet = MixNet(input_size=(24, 24), num_classes=3)
    # eye_class_dict = {0:"open_eye",1:"close_eye"}
    # weight_dict = torch.load("weight/signal_eye/Mixnet_epoch_29.pth")
    weight_dict = torch.load(
        "/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/weight/relabel_mix_24_24_20210302/Mixnet_epoch_59.pth"
    )
    new_state_dict = OrderedDict()
    for k, v in weight_dict.items():
        name = k[7:]
        new_state_dict[name] = v

    mixnet.load_state_dict(new_state_dict)
    # stat(net,(3,48,48))
    mixnet.to('cuda:0')
    mixnet.eval()

    pnet, rnet, onet = create_mtcnn_net(
        p_model_path=r'model_store/final/pnet_epoch_19.pt',
        r_model_path=r'model_store/final/rnet_epoch_7.pt',
        o_model_path=r'model_store/final/onet_epoch_92.pt',
        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24,
                                   threshold=threshold)
    img_file = "/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/test_video/caiji_0123"
    img_save = "/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/result_video/relabel_img_result_adma_01"
    img_path = [
        os.path.join(img_file, file_name)
        for file_name in glob.glob(os.path.join(img_file, "*.jpg"))
    ]

    # videos_root_path = 'test_video/DMS_RAW_Nebula_20201201-143038_518.mp4'
    # save_path_root = 'result_video/24_24_DMS_RAW_Nebula_20201201-143038_518.avi'

    # cap = cv2.VideoCapture(videos_root_path)
    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # fps = cap.get(cv2.CAP_PROP_FPS)
    # size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    # tpa
    # fname = os.path.splitext(os.path.split(tpa)[1])[0]
    # save_path = os.path.join("/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/result_video/data(2)",fname+".avi")
    # out = cv2.VideoWriter(save_path_root, fourcc, fps, size)
    for img_p in tqdm(img_path):
        frame = cv2.imread(img_p)

        copy_frame = frame.copy()
        left_right_eye = []
        bboxs, landmarks, wearmask = mtcnn_detector.detect_face(frame,
                                                                rgb=True)

        if landmarks is not None:
            for i in range(landmarks.shape[0]):
                landmarks_one = landmarks[i, :]
                landmarks_one = landmarks_one.reshape((point_nums, 2))
                left_eye = np.array(landmarks_one[[6, 8, 10, 11, 14], :])
                xmin = np.min(left_eye[:, 0])
                ymin = np.min(left_eye[:, 1])
                xmax = np.max(left_eye[:, 0])
                ymax = np.max(left_eye[:, 1])
                left_right_eye.append([xmin, ymin, xmax, ymax])
                # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)

                right_eye = np.array(landmarks_one[[7, 9, 12, 13, 15], :])
                xmin = np.min(right_eye[:, 0])
                ymin = np.min(right_eye[:, 1])
                xmax = np.max(right_eye[:, 0])
                ymax = np.max(right_eye[:, 1])
                left_right_eye.append([xmin, ymin, xmax, ymax])
                # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)
                for j in [*left_eye, *right_eye]:
                    cv2.circle(frame, (int(j[0]), int(j[1])), 2, (255, 0, 0),
                               -1)

            crop_img = []
            for xmin, ymin, xmax, ymax in left_right_eye:
                w, h = xmax - xmin, ymax - ymin
                # 随机扩展大小0.05-0.15
                k = 0.1
                ratio = h / w
                if ratio > 1:
                    ratio = ratio - 1
                    xmin -= (ratio / 2 * w + k * h)
                    ymin -= (k * h)
                    xmax += (ratio / 2 * w + k * h)
                    ymax += (k * h)

                else:
                    ratio = w / h - 1
                    xmin -= (k * w)
                    ymin -= (ratio / 2 * h + k * w)
                    xmax += (k * w)
                    ymax += (ratio / 2 * h + k * w)
                cv2.rectangle(frame, (int(xmin), int(ymin)),
                              (int(xmax), int(ymax)), (0, 255, 255), 2)
                temp_img = copy_frame[int(ymin):int(ymax), int(xmin):int(xmax)]
                # temp_img = cv2.resize(temp_img,(24,24))
                crop_img.append(temp_img)
            if len(crop_img) < 2:
                img_name = os.path.split(img_p)[-1]
                cv2.imwrite(os.path.join(img_save, img_name), frame)
                # out.write(frame)
                continue
            # compose_img = np.hstack((crop_img[0],crop_img[1]))
            result_buff = []
            score_buff = []
            for i in crop_img:
                i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)

                compose_img = Image.fromarray(i)
                img = data_trans(compose_img)
                img = img.unsqueeze(0)
                with torch.no_grad():
                    outputs = mixnet(img.to('cuda:0'))
                    spft_max = torch.nn.functional.softmax(outputs, dim=1)
                    score_buff.append(spft_max.cpu().numpy())
                    # 0,1->data,id
                    score, result = torch.max(spft_max, 1)
                    result_buff.append([result.item(), score])
            bias = 30
            eye_bias = 100
            for i in range(2):
                t_result = result_buff[i][0]
                if 0 == t_result:
                    # eye_class = "close_eye"
                    # cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(0,255,0) \
                    # ,thickness=2)
                    eye_class = "open_eye:{:.2f}".format(
                        result_buff[i][1].cpu().item())
                    cv2.putText(frame,eye_class,(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-bias),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,255) \
                    ,thickness=2)
                elif 1 == t_result:
                    # eye_class = "open_eye"
                    # cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(255,0,255) \
                    # ,thickness=2)

                    eye_class = "close_eye:{:.2f}".format(
                        result_buff[i][1].cpu().item())
                    cv2.putText(frame,eye_class,(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-bias),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,255,0) \
                    ,thickness=2)
                else:
                    eye_class = "other:{:.2f}".format(
                        result_buff[i][1].cpu().item())
                    cv2.putText(frame,eye_class,(int(left_right_eye[i][0])-eye_bias, int(left_right_eye[i][1])-bias),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255) \
                    ,thickness=2)
                # bias += 30
                eye_bias = 0
                # left_eye
                left_eye_open, left_eye_close, left_eye_other = score_buff[0][
                    0]
                cv2.putText(frame,"left_open:{:.2f}".format(left_eye_open) ,(10, 20),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_close:{:.2f}".format(left_eye_close) ,(10, 40),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_other:{:.2f}".format(left_eye_other) ,(10, 60),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)

                #right_eye
                right_eye_open, right_eye_close, right_eye_other = score_buff[
                    1][0]
                cv2.putText(frame,"left_open:{:.2f}".format(right_eye_open) ,(200, 20),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_close:{:.2f}".format(right_eye_close) ,(200, 40),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
                cv2.putText(frame,"left_other:{:.2f}".format(right_eye_other) ,(200, 60),cv2.FONT_HERSHEY_COMPLEX,0.6,(20,150,0) \
                    ,thickness=2)
            # eye_class = "open_eye" if 0 in t_result else "close_eye"
        img_name = os.path.split(img_p)[-1]
        cv2.imwrite(os.path.join(img_save, img_name), frame)
def show_with_camera():

    eye_class_dict = {0: "open_eye", 1: "close_eye", 2: "other"}
    point_nums = 24
    threshold = [0.6, 0.7, 0.7]
    data_trans = Transforms.Compose([
        Transforms.Resize((24, 24)),
        Transforms.ToTensor(),
        Transforms.Normalize((0.45, 0.448, 0.455), (0.082, 0.082, 0.082)),
        # Transforms.Normalize((0.407, 0.405, 0.412), (0.087, 0.087, 0.087)),
    ])
    mixnet = MixNet(input_size=(24, 24), num_classes=3)
    # eye_class_dict = {0:"open_eye",1:"close_eye"}
    # weight_dict = torch.load("weight/signal_eye/Mixnet_epoch_29.pth")
    weight_dict = torch.load(
        "/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/weight/mix_mbhk_change_signal_eye_24_24/Mixnet_epoch_59.pth"
    )
    new_state_dict = OrderedDict()
    for k, v in weight_dict.items():
        name = k[7:]
        new_state_dict[name] = v

    mixnet.load_state_dict(new_state_dict)
    # stat(net,(3,48,48))
    mixnet.to('cuda:0')
    mixnet.eval()

    pnet, rnet, onet = create_mtcnn_net(
        p_model_path=r'model_store/final/pnet_epoch_19.pt',
        r_model_path=r'model_store/final/rnet_epoch_7.pt',
        o_model_path=r'model_store/final/onet_epoch_92.pt',
        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24,
                                   threshold=threshold)
    videos_root_path = 'test_video/20200506143954001_0.avi'
    save_path_root = 'result_video/camera_test_20210301.avi'

    cap = cv2.VideoCapture(0)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    fps = cap.get(cv2.CAP_PROP_FPS)
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    # tpa
    # fname = os.path.splitext(os.path.split(tpa)[1])[0]
    # save_path = os.path.join("/media/omnisky/D4T/JSH/faceFenlei/Projects/hul_eye_class/result_video/data(2)",fname+".avi")
    out = cv2.VideoWriter(save_path_root, fourcc, fps, size)
    while True:
        ret, frame = cap.read()

        if ret:
            copy_frame = frame.copy()
            left_right_eye = []
            bboxs, landmarks, wearmask = mtcnn_detector.detect_face(frame,
                                                                    rgb=True)

            if landmarks is not None:
                for i in range(landmarks.shape[0]):
                    landmarks_one = landmarks[i, :]
                    landmarks_one = landmarks_one.reshape((point_nums, 2))
                    left_eye = np.array(landmarks_one[[6, 8, 10, 11, 14], :])
                    xmin = np.min(left_eye[:, 0])
                    ymin = np.min(left_eye[:, 1])
                    xmax = np.max(left_eye[:, 0])
                    ymax = np.max(left_eye[:, 1])
                    left_right_eye.append([xmin, ymin, xmax, ymax])
                    # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)

                    right_eye = np.array(landmarks_one[[7, 9, 12, 13, 15], :])
                    xmin = np.min(right_eye[:, 0])
                    ymin = np.min(right_eye[:, 1])
                    xmax = np.max(right_eye[:, 0])
                    ymax = np.max(right_eye[:, 1])
                    left_right_eye.append([xmin, ymin, xmax, ymax])
                    # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)
                    for j in [*left_eye, *right_eye]:
                        cv2.circle(frame, (int(j[0]), int(j[1])), 2,
                                   (255, 0, 0), -1)

                crop_img = []
                for xmin, ymin, xmax, ymax in left_right_eye:
                    w, h = xmax - xmin, ymax - ymin
                    # 随机扩展大小0.05-0.15
                    k = 0.1
                    ratio = h / w
                    if ratio > 1:
                        ratio = ratio - 1
                        xmin -= (ratio / 2 * w + k * h)
                        ymin -= (k * h)
                        xmax += (ratio / 2 * w + k * h)
                        ymax += (k * h)

                    else:
                        ratio = w / h - 1
                        xmin -= (k * w)
                        ymin -= (ratio / 2 * h + k * w)
                        xmax += (k * w)
                        ymax += (ratio / 2 * h + k * w)
                    cv2.rectangle(frame, (int(xmin), int(ymin)),
                                  (int(xmax), int(ymax)), (0, 255, 255), 2)
                    temp_img = copy_frame[int(ymin):int(ymax),
                                          int(xmin):int(xmax)]
                    # temp_img = cv2.resize(temp_img,(24,24))
                    crop_img.append(temp_img)
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
                if len(crop_img) < 2:
                    cv2.imshow("test", frame)
                    tget_in = cv2.waitKey(10)
                    # print(ord('q'),tget_in)
                    if tget_in == ord('q'):
                        print("get out")
                        break
                    out.write(frame)
                    continue
                # compose_img = np.hstack((crop_img[0],crop_img[1]))
                t_result = []
                for i in crop_img:
                    i = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
                    i = cv2.cvtColor(i, cv2.COLOR_GRAY2RGB)

                    compose_img = Image.fromarray(i)
                    img = data_trans(compose_img)
                    img = img.unsqueeze(0)
                    with torch.no_grad():
                        outputs = mixnet(img.to('cuda:0'))
                        result = torch.max(outputs, 1)[1]
                        t_result.append(result.item())
                if 0 in t_result:
                    eye_class = "open_eye"
                    cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(255,0,255) \
                    ,thickness=2)
                elif 1 in t_result:
                    eye_class = "close_eye"
                    cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(0,255,0) \
                    ,thickness=2)
                else:
                    eye_class = "other"
                    cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(0,0,255) \
                    ,thickness=2)
                cv2.imshow("test", frame)
                tget_in = cv2.waitKey(10)
                if tget_in == ord('q'):
                    print("get out")
                    break
                # eye_class = "open_eye" if 0 in t_result else "close_eye"

                # cv2.putText(frame,eye_class,(int(xmax), int(ymax)-20),cv2.FONT_HERSHEY_COMPLEX,1.0,(255,0,255) \
                #     if 0 in t_result else (255,255,0),thickness=2)
            out.write(frame)
        else:
            print("finish")
            break
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import transforms as transform
from mtcnn.core.detect import create_mtcnn_net, MtcnnDetector
from mtcnn.core.vision import vis_face
from models import vgg_prune
#from models import resnet_prune

use_cuda = False
cut_size = 46
fps1 = 0.0
fps2 = 0.0
v = 0.0000000001

pnet, rnet, onet = create_mtcnn_net(p_model_path="mtcnn_models/pnet.pt", r_model_path="mtcnn_models/rnet.pt", o_model_path="mtcnn_models/onet.pt", use_cuda=use_cuda)
mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size = 48, stride=2, threshold=[0.66, 0.7, 0.7], scale_factor=0.709)

#class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
class_names = ['just so so', 'just so so', 'just so so', 'good', 'common', 'just so so', 'common']

transform_test = transform.Compose([
    transform.TenCrop(cut_size),
    transform.Lambda(lambda crops: torch.stack([transform.ToTensor()(crop) for crop in crops])),
])

print('==>  ori_model ...')
net = vgg_prune.VGG()
checkpoint = torch.load('ori_models/fer_Pri_vgg16.pth')
net.load_state_dict(checkpoint['state_dict'])
#print('==>  ori_model ...')
Ejemplo n.º 20
0
def main():
    eye_class_dict = {0: "open_eye", 1: "close_eye", 2: "other"}
    point_nums = 24
    threshold = [0.6, 0.7, 0.7]
    data_trans = Transforms.Compose([
        # Transforms.Resize((24, 48)),
        Transforms.ToTensor(),
        Transforms.Normalize((0.407, 0.405, 0.412), (0.087, 0.087, 0.087)),
    ])
    mixnet = MixNet(input_size=(24, 48), num_classes=3)
    weight_dict = torch.load("weight/change_mix_data_0202/Mixnet_epoch_59.pth")
    new_state_dict = OrderedDict()
    for k, v in weight_dict.items():
        name = k[7:]
        new_state_dict[name] = v

    mixnet.load_state_dict(new_state_dict)
    # stat(net,(3,48,48))
    mixnet.to('cuda:0')
    mixnet.eval()

    pnet, rnet, onet = create_mtcnn_net(
        p_model_path=r'model_store/final/pnet_epoch_19.pt',
        r_model_path=r'model_store/final/rnet_epoch_7.pt',
        o_model_path=r'model_store/final/onet_epoch_92.pt',
        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24,
                                   threshold=threshold)
    videos_root_path = 'test_video/20200522164730261_0.avi'
    save_path_root = 'result_video/20200522164730261_0.avi'

    cap = cv2.VideoCapture(videos_root_path)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    fps = cap.get(cv2.CAP_PROP_FPS)
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    out = cv2.VideoWriter(save_path_root, fourcc, fps, size)
    while True:
        ret, frame = cap.read()

        if ret:
            copy_frame = frame.copy()
            left_right_eye = []
            bboxs, landmarks, wearmask = mtcnn_detector.detect_face(frame,
                                                                    rgb=True)

            if landmarks is not None:
                for i in range(landmarks.shape[0]):
                    landmarks_one = landmarks[i, :]
                    landmarks_one = landmarks_one.reshape((point_nums, 2))
                    left_eye = np.array(landmarks_one[[6, 8, 10, 11, 14], :])
                    xmin = np.min(left_eye[:, 0])
                    ymin = np.min(left_eye[:, 1])
                    xmax = np.max(left_eye[:, 0])
                    ymax = np.max(left_eye[:, 1])
                    left_right_eye.append([xmin, ymin, xmax, ymax])
                    # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)

                    right_eye = np.array(landmarks_one[[7, 9, 12, 13, 15], :])
                    xmin = np.min(right_eye[:, 0])
                    ymin = np.min(right_eye[:, 1])
                    xmax = np.max(right_eye[:, 0])
                    ymax = np.max(right_eye[:, 1])
                    left_right_eye.append([xmin, ymin, xmax, ymax])
                    # cv2.rectangle(frame,(int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,255,0),2)
                    for j in [*left_eye, *right_eye]:
                        cv2.circle(frame, (int(j[0]), int(j[1])), 2,
                                   (255, 0, 0), -1)

                crop_img = []
                for xmin, ymin, xmax, ymax in left_right_eye:
                    w, h = xmax - xmin, ymax - ymin
                    # 随机扩展大小0.05-0.15
                    k = 0.1
                    ratio = h / w
                    if ratio > 1:
                        ratio = ratio - 1
                        xmin -= (ratio / 2 * w + k * h)
                        ymin -= (k * h)
                        xmax += (ratio / 2 * w + k * h)
                        ymax += (k * h)

                    else:
                        ratio = w / h - 1
                        xmin -= (k * w)
                        ymin -= (ratio / 2 * h + k * w)
                        xmax += (k * w)
                        ymax += (ratio / 2 * h + k * w)
                    cv2.rectangle(frame, (int(xmin), int(ymin)),
                                  (int(xmax), int(ymax)), (0, 255, 255), 2)
                    temp_img = copy_frame[int(ymin):int(ymax),
                                          int(xmin):int(xmax)]
                    temp_img = cv2.resize(temp_img, (24, 24))
                    crop_img.append(temp_img)
                if len(crop_img) < 2:
                    out.write(frame)
                    continue
                compose_img = np.hstack((crop_img[0], crop_img[1]))
                compose_img = cv2.cvtColor(compose_img, cv2.COLOR_BGR2RGB)

                compose_img = Image.fromarray(compose_img)
                img = data_trans(compose_img)
                img = img.unsqueeze(0)
                with torch.no_grad():
                    outputs = mixnet(img.to('cuda:0'))
                    result = torch.max(outputs, 1)[1]
                    eye_class = eye_class_dict[result.item()]
                cv2.putText(frame,eye_class,(0,20),cv2.FONT_HERSHEY_COMPLEX,1.3,(255,0,255) \
                    if result.item() == 0 else (255,255,0),thickness=2)
            out.write(frame)
        else:
            print("finish")
            break