Ejemplo n.º 1
0
def get_frontal_image(image):

    predictor_path = 'dlib_models/shape_predictor_68_face_landmarks.dat'
    model3D_path = 'frontalization_models/model3Ddlib.mat'
    eyemask_path = 'frontalization_models/eyemask.mat'

    model3D = frontalize.ThreeD_Model(model3D_path, 'model_dlib')
    eyemask = np.asarray(io.loadmat(eyemask_path)['eyemask'])
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    img = cv2.imread(image, 1)
    height, width, layers = img.shape
    new_h = round(height / 3)
    new_w = round(width / 3)
    img = cv2.resize(img, (new_w, new_h))

    model3D = frontalize.ThreeD_Model('frontalization_models/model3Ddlib.mat',
                                      'model_dlib')
    lmarks = feature_detection.get_landmarks(img, detector, predictor)
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    cv2.imwrite('frontal_image.png', frontal_sym)
Ejemplo n.º 2
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    img = cv2.imread("test.jpg", 1)
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Ejemplo n.º 3
0
 def __init__(self, args):
     self.args = args
     check.check_dlib_landmark_weights(args)
     self.detector = dlib.get_frontal_face_detector()
     self.model3D = front.ThreeD_Model(
         args.resource_dir + '/Image/model3Ddlib.mat', 'model_dlib')
     self.eyemask = np.asarray(
         io.loadmat(args.resource_dir + '/Image/eyemask.mat')['eyemask'])
Ejemplo n.º 4
0
 def __init__(self):
     this_path = os.path.dirname(os.path.abspath(__file__))
     check.check_dlib_landmark_weights()
     self.model3D = frontalize.ThreeD_Model(
         this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
     self.eyemask = np.asarray(
         io.loadmat(this_path +
                    '/frontalization_models/eyemask.mat')['eyemask'])
Ejemplo n.º 5
0
def img_normalization(img, lmark):
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

    height, width, _ = img.shape

    #face_lu = np.array([min(lmark[:,0]), min(lmark[:,1])])
    #face_rd = np.array([max(lmark[:,0]), max(lmark[:,1])])
    #face_lu = face_lu - 20
    #face_rd = face_rd + 20

    #if face_lu[0]<0:
    #    face_lu[0] = 0
    #if face_lu[1]<0:
    #    face_lu[1] = 0
    #if face_rd[0] > width:
    #    face_rd[0] = width
    #if face_rd[1] > height:
    #    face_rd[1] = height
    #face_lu = face_lu.astype('uint64')
    #face_rd = face_rd.astype('uint64')

    #new_img = img[face_lu[1]:face_rd[1], face_lu[0]:face_rd[0]]
    #new_img = cv2.resize(img, (320, 320))

    #lmark = lmark - [face_lu[0], face_lu[1]]
    #w_res = 320/(face_rd[0]-face_lu[0])
    #h_res = 320/(face_rd[1]-face_lu[1])
    #lmark = lmark*[w_res, h_res]
    #new_lm = copy.copy(lmark)

    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmark)
    frontal_raw, frontal_sym, proj_map = frontalize.frontalize(
        img, proj_matrix, model3D.ref_U, eyemask)

    rot_lmark = np.zeros((lmark.shape[0], 2))
    for i in range(lmark.shape[0]):
        rot_lmark[i][1], rot_lmark[i][0] = find_nearest(proj_map, lmark[i])

    reye_ul, reye_dr = lmark_bbox(rot_lmark[36:42])
    leye_ul, leye_dr = lmark_bbox(rot_lmark[42:48])
    mouth_ul, mouth_dr = lmark_bbox(rot_lmark[48:68], pos='mouth')

    int_frontal = frontal_sym.astype('uint8')
    reye = int_frontal[reye_ul[0]:reye_dr[0], reye_ul[1]:reye_dr[1]]
    leye = int_frontal[leye_ul[0]:leye_dr[0], leye_ul[1]:leye_dr[1]]
    mouth = int_frontal[mouth_ul[0]:mouth_dr[0], mouth_ul[1]:mouth_dr[1]]

    frontal_lmark = np.copy(int_frontal)
    for i in range(len(rot_lmark)):
        cv2.circle(frontal_lmark, (int(rot_lmark[i][0]), int(rot_lmark[i][1])),
                   2, (0, 0, 255),
                   thickness=-1)

    return int_frontal, reye, leye, mouth, frontal_lmark
Ejemplo n.º 6
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    # img2 = cv2.imread("test.jpg", 1)
    img = cv2.imread(
        "/home/jordan/PycharmProjects/Emotion_Analysis/Facial_Expression_recog/data/test_photo.jpg",
        1)
    img = cv2.resize(img, (int(img.shape[0] / 3), int(img.shape[1] / 9)))
    # detected_object = mouthdetection.findmouth(img, haar_face, haar_mouth)[0]
    # img = smile_detection.crop(detected_object, img, extended = True)
    cv2.imshow("", img)
    cv2.waitKey(5000)
    img = img.astype(np.uint8)
    # print img.shape
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Ejemplo n.º 7
0
def demo(path):

    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')

    img = cv2.imread(path, 1)

    lmarks = feature_detection.get_landmarks(img)

    if (len(lmarks) > 0):
        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            model3D, lmarks[0])

        eyemask = np.asarray(
            io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

        frontal_raw, frontal_sym = frontalize.frontalize(
            img, proj_matrix, model3D.ref_U, eyemask)
        cv2.imwrite(path, frontal_raw)
def process():
    global vs, outputFrame, lock, registeredFace, lock

    imgArray, labels = face_recognition_HAAR.readImageOnly(
        "./Database_aligned_3D")
    names = list(set(labels))
    names.sort()

    print("Available persons in database:" + str(names))
    labelNumPersonDict = {name: i for i, name in enumerate(names)}
    labelNumPerson = [labelNumPersonDict.get(n, n) for n in labels]

    model3D = frontalize.ThreeD_Model(
        "./frontalization_models/model3Ddlib.mat", 'model_dlib')
    model3D.ref_U = imutils.resize(model3D.ref_U[52:250, 91:225, :], height=60)

    eyeMask = np.asarray(
        sio.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    eyeMask = imutils.resize(eyeMask[52:250, 91:225], height=60)

    model3D.out_A = np.asmatrix(np.array(
        [[0.5 * 506.696672, 0, 0.5 * 324.202],
         [0, 0.5 * 506.3752, 0.5 * 245.7785096], [0, 0, 1]]),
                                dtype='float32')
    model3D.distCoeff = None

    print("Training lbph recognizer")
    recognizer = cv2.face.LBPHFaceRecognizer_create(1, 8, 8, 8)
    recognizer.train(imgArray, np.array(labelNumPerson))
    print("Training finished, begin streaming...")
    while (True):

        frame = vs.read()
        image, front = face_recognition_HAAR.performFaceRecognitionWithFrontalisationV2(
            frame, recognizer, model3D, eyeMask, labelNumPersonDict)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        with lock:
            outputFrame = np.copy(image)
            registeredFace = np.copy(front)
Ejemplo n.º 9
0
def demo(image):
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    img = cv2.resize(image, (250, 250), interpolation=cv2.INTER_LINEAR)

    lmarks, f = feature_detection.get_landmarks(img)

    if not f:
        return False, 1

    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])

    print(proj_matrix)

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization

    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    '''
    plt.figure()
    plt.title('Image frontalized(Before symmetry)')
    plt.imshow(frontal_raw[:,:,::-1].astype('uint8'))
    plt.figure()
    plt.title('Image frontalized(After symmetry)')
    plt.imshow(frontal_sym[:,:,::-1].astype('uint8'))
    '''
    x, y, z = grammer.get_angle(rmat)
    print(('旋转的角度: x: {}, y: {}, z: {}').format(x, y, z))  #估算大概的旋转角度
    image_output = frontal_sym.astype('uint8')
    return True, image_output
def load_face_frontal_elements(path_to_face_front_utilyties):
    '''
    Routine to prepare elements for face-frontalization module by dougsouza in:
    https://github.com/dougsouza/face-frontalization
    Tal Hassner, Shai Harel, Eran Paz and Roee Enbar, Effective Face Frontalization in Unconstrained Images,
    IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), Boston, June 2015
    '''

    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()

    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        path_to_face_front_utilyties +
        '/frontalization_models/model3Ddlib.mat', 'model_dlib')

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat(path_to_face_front_utilyties +
                   '/frontalization_models/eyemask.mat')['eyemask'])

    return model3D, eyemask
Ejemplo n.º 11
0
def get_video_points(
        video_file,
        predictor_path='dlib_models/shape_predictor_68_face_landmarks.dat',
        model3D_path='frontalization_models/model3Ddlib.mat',
        eyemask_path='frontalization_models/eyemask.mat',
        mode='face',
        resize=False):

    time_list = []
    points_list = []
    points_list_sym = []
    points_list_raw = []

    model3D = frontalize.ThreeD_Model(model3D_path, 'model_dlib')
    eyemask = np.asarray(io.loadmat(eyemask_path)['eyemask'])
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    cap = cv2.VideoCapture(video_file)
    fps = cap.get(cv2.CAP_PROP_FPS)
    print('fps=', fps)

    # width = 320
    # height = 320

    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # video_sym = cv2.VideoWriter('video_sym.avi', fourcc, fps, (width, height))
    # video_raw = cv2.VideoWriter('video_raw.avi', fourcc, fps, (width, height))

    while True:
        try:

            ret, frame = cap.read()
            if not ret:
                print("no ret")
                break

            msec = cap.get(cv2.CAP_PROP_POS_MSEC)
            print(msec / 1000)

            img = frame
            iheight, iwidth, layers = img.shape

            #resize
            if resize == True:
                new_h = round(iheight / 3)
                new_w = round(iwidth / 3)
                img = cv2.resize(img, (new_w, new_h))

            lmarks = feature_detection.get_landmarks(img, detector, predictor)

            if len(lmarks) != 1:
                continue

            proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                model3D, lmarks[0])

            frontal_raw, frontal_sym = frontalize.frontalize(
                img, proj_matrix, model3D.ref_U, eyemask)

            time_list.append(msec)

            new_img_sym = np.round(frontal_sym).astype(np.uint8)
            new_img_raw = np.round(frontal_raw).astype(np.uint8)
            temp_pts = []
            lmarks_sym = feature_detection.get_landmarks(
                new_img_sym, detector, predictor)
            temp_pts = [[x, y] for x, y in lmarks_sym[0]]

            if mode == 'mouth':
                temp_pts = temp_pts[33:34] + temp_pts[48:]

            points_list_sym.append(temp_pts)

            # video_sym.write(new_img_sym)
            # video_raw.write(new_img_raw)

        except ValueError as e:
            print(e)
            break

    cap.release()
    points_list = list(zip(time_list, points_list_sym))

    with shelve.open(
            'extracted_data/' + video_file.split('/')[-1].split('.')[0] +
            '.txt', 'c') as db:
        for i, x in enumerate(points_list):
            db[str(i)] = x

    return (points_list)
Ejemplo n.º 12
0
def myfrontalize(X, limit=0):
    count = 0

    if (limit == 0):
        limit = X.shape[0]

    print("Total Images: ", limit)

    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()

    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

    for i in range(0, limit):
        print("\r", end='')
        print("Images Completed: {0}".format(i), end='', flush=True)

        # cast img to type int for cv2
        img = X[i, :, :, 0].astype(np.uint8)

        # create a color version for frontalizer stuffs
        c_img = np.copy(img)
        c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)

        # extract landmarks from the query image
        # list containing a 2D array with points (x, y) for each face detected in the query image
        lmarks = feature_detection.get_landmarks(c_img)
        if type(lmarks) is np.ndarray:

            # perform camera calibration according to the first face detected
            proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                model3D, lmarks[0])

            # perform frontalization and convert result to grayscale
            frontal_raw, frontal_sym = frontalize.frontalize(
                c_img, proj_matrix, model3D.ref_U, eyemask)
            temp = cv2.cvtColor(frontal_raw, cv2.COLOR_BGR2GRAY)

            # find nonzero bbox and crop image to remove uncessesary black space from edges
            temp_mask = cv2.findNonZero(temp)
            t_x, t_y, t_w, t_h = cv2.boundingRect(temp_mask)
            t_bbox = temp[t_y:t_y + t_h, t_x:t_x + t_w]

            # resize the cropped image to the appropriate dimensions for network
            t_bbox = cv2.resize(t_bbox, dsize=(48, 48))
            t_bbox = np.resize(t_bbox, (48, 48, 1))
            X[i] = t_bbox.astype(np.float32)

            plt.show()
            count += 1

    print()
    print('{} images out of {} were frontalized.'.format(count, limit))
def readVideosAndLabels(directory):
    namesUnique = os.listdir(directory)
    labels = []
    faceMatrix = None
    print(namesUnique)

    indiceToSymmetry = [
        17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 27, 26, 25,
        24, 23, 22, 21, 20, 19, 18, 28, 29, 30, 31, 36, 35, 35, 33, 32, 46, 45,
        44, 43, 48, 47, 40, 39, 38, 37, 42, 41, 55, 54, 53, 52, 51, 50, 49, 60,
        59, 58, 57, 56, 65, 64, 63, 62, 61, 68, 67, 66
    ]
    indiceToSymmetry = list(np.array(indiceToSymmetry) - 1)

    model3D = frontalize.ThreeD_Model(
        "./frontalization_models/model3Ddlib.mat", 'model_dlib')

    eyemask = np.asarray(
        sio.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    for ind, folder in enumerate(namesUnique):
        for path in os.listdir(os.path.join(directory, folder)):

            path = os.path.join(directory, folder, path)
            cap = cv2.VideoCapture(path)
            count = 0
            trackingLost = True
            print(folder)
            """
            if(folder!="Romuald"):
                print("a")
                
                if(folder=="Stephane"):
                    pass
                else:
                   break
            """
            if (folder != "Florian"):
                continue
            while (True):

                ret, imageO = cap.read()

                if (count % 2 == 1):

                    if (not ret):
                        break
                    #print(imageO.shape)
                    #image = cv2.resize(image,(700,700))
                    if (folder == "Faddwzi"):
                        #image=imutils.rotate_bound(image, -90)
                        break
                        #pass
                    else:
                        #image=imutils.rotate_bound(image, 90)
                        image = np.ascontiguousarray(np.transpose(
                            np.copy(imageO), (1, 0, 2)),
                                                     dtype=np.uint8)
                        #image=np.ascontiguousarray(rotateImage(imageO),dtype=np.uint8)
                    #image=imutils.rotate_bound(image, 90)
                    #image=imutils.resize(image, width=370)

                    #imageWithLandmarks,landmarks,_=searchForFaceInTheWholeImage(np.copy(image))
                    if (trackingLost):
                        landmarks, faceROI, trackingLost, image = trackFaceInANeighborhoodAndDetectLandmarks(
                            np.copy(image),
                            faceROI=[
                                0, 0, image.shape[0] - 1, image.shape[1] - 1
                            ],
                            drawBoundingBoxes=True)
                    else:
                        landmarks, faceROI, trackingLost, image = trackFaceInANeighborhoodAndDetectLandmarks(
                            np.copy(image), faceROI[0], drawBoundingBoxes=True)
                    #print(trackingLost,faceROI)
                    cv2.imshow('image', image)
                    cv2.waitKey(1)
                    if (trackingLost):
                        continue
                    #pdb.set_trace()
                    #landmarks[0][:,1]=landmarks[0][:,1]+np.sign(image.shape[]landmarks[0][:,1])
                    landmarks[0][:, [0, 1]] = landmarks[0][:, [1, 0]]
                    #landmarks[0][range(0,68)]=landmarks[0][indiceToSymmetry,:]
                    #pdb.set_trace()
                    #######registeredFace=performFaceAlignment(image,landmarks[0],cols=600,rows=600)
                    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                        model3D, landmarks[0])
                    # load mask to exclude eyes from symmetry
                    # perform frontalization
                    frontal_raw, registeredFace = frontalize.frontalize(
                        imageO, proj_matrix, model3D.ref_U, eyemask)

                    if (registeredFace is not None):

                        faceVector = ((registeredFace) / 255.0).reshape(-1)
                        if (faceMatrix is None):
                            faceMatrix = faceVector
                        else:
                            faceMatrix = np.vstack((faceMatrix, faceVector))
                        labels.append(folder)
                        for landmark in landmarks:
                            cv2.polylines(
                                image, np.int32(landmark.reshape((-1, 1, 2))),
                                True, (0, 0, 255), 3)
                        cv2.imshow('image333', frontal_raw)
                        cv2.imshow('aligned face', registeredFace)
                        cv2.waitKey(1)
                    cv2.imwrite(
                        os.path.join(directory, folder,
                                     str(count) + '.jpg'), registeredFace)

                count = count + 1
            cap.release()
    return faceMatrix, labels
    print("reading images step finished")

    names = (list(set(labels)))
    labelNumPersonDict = {name: i for i, name in enumerate(names)}
    labelNumPerson = [labelNumPersonDict.get(n, n) for n in labels]

    recognizer = cv2.face.LBPHFaceRecognizer_create(1, 8, 8, 8)
    recognizer.train(imgArray, np.array(labelNumPerson))

    predictOnTestImages(".//Frontalization_data_train", recognizer,
                        labelNumPersonDict)
    ##predictOnTestImages(".//frontal_appstud_test",recognizer, labelNumPersonDict)
    ##predictOnTestImages(".//frontal_appstud_test",recognizer, labelNumPersonDict)

    cap = cv2.VideoCapture(0)
    model3D = frontalize.ThreeD_Model(
        "./frontalization_models/model3Ddlib.mat", 'model_dlib')

    eyeMask = np.asarray(
        sio.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    model3D.out_A = np.asmatrix(np.array([[572.216, 0, 316.76],
                                          [0, 577.67, 223.65], [0, 0, 1]]),
                                dtype='float32')  #3x3
    model3D.distCoeff = np.array(
        [0.16975413, -0.39957115, -0.01705758, 0.00303078, -0.25477128])
    #print(labelNumPersonDict)
    while (True):
        ret, frame = cap.read()

        if (ret):
            person = performFaceRecognitionWithFrontalisation(
                frame, recognizer, labelNumPersonDict, model3D, eyeMask)
def readVideosAndLabels(directory, alignment="2D"):
    namesUnique = os.listdir(directory)
    labels = []
    faceMatrix = None
    outPath = directory + "_aligned_" + alignment
    createDirectory(outPath)

    if (alignment != "2D"):
        model3D = frontalize.ThreeD_Model(
            "./frontalization_models/model3Ddlib.mat", 'model_dlib')
        eyemask = np.asarray(
            sio.loadmat('frontalization_models/eyemask.mat')['eyemask'])
        eyemask = imutils.resize(eyemask[52:250, 91:225], height=60)
        model3D.ref_U = imutils.resize(model3D.ref_U[52:250, 91:225, :],
                                       height=60)

        model3D.out_A = np.asmatrix(np.array(
            [[0.5 * 506.696672, 0, 0.5 * 324.202],
             [0, 0.5 * 506.3752, 0.5 * 245.7785096], [0, 0, 1]]),
                                    dtype='float32')  #3x3

        model3D.distCoeff = None

    count = 0
    for ind, folder in enumerate(namesUnique):
        createDirectory(os.path.join(outPath, folder))

        for path in os.listdir(os.path.join(directory, folder)):

            path = os.path.join(directory, folder, path)
            cap = cv2.VideoCapture(path)
            trackingLost = True

            while (True):

                ret, imageO = cap.read()

                if (not ret):
                    break
                landmarks, faceROI, trackingLost, image = trackFaceInANeighborhoodAndDetectLandmarks(
                    np.copy(imageO),
                    faceROI=[0, 0, imageO.shape[0] - 1, imageO.shape[1] - 1],
                    drawBoundingBoxes=True)
                if (trackingLost):
                    continue
                for k, landmark in enumerate(landmarks):

                    if (alignment == "2D"):
                        registeredFace = performFaceAlignment(imageO,
                                                              landmark,
                                                              cols=600,
                                                              rows=600)
                    else:
                        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                            model3D, landmark)
                        frontal_raw, registeredFace = frontalize.frontalize(
                            imageO, proj_matrix, model3D.ref_U, eyemask)
                        xdir = 2 * np.array([-49.6694, -0.3201, 1.0163])
                        ydir = 4 * np.array([-0.9852, -3.1128, 15.0628])
                        zdir = -np.array([-1.658, 747.159, 154.29]) / 5.0
                        origin = np.array([-0.0845, -74.7281, 27.2774])
                        image, _ = model3D.drawCoordinateSystems(
                            np.hstack((rmat, tvec)),
                            image,
                            _3Dpoints=np.array([
                                origin, origin + xdir, origin + ydir,
                                origin + zdir
                            ]))
                        #image=model3D.drawCandideMesh(np.hstack((rmat,tvec)),image)

                    if (registeredFace is not None):

                        cv2.polylines(image,
                                      np.int32(landmark.reshape((-1, 1, 2))),
                                      True, (0, 0, 255), 3)

                        box = goodAlignement_cascade.detectMultiScale(
                            registeredFace, 1.1, 1, minSize=(32, 32))

                        try:
                            """count=count+1
                            cv2.imwrite(os.path.join(outPath,folder,str(count)+'.jpg'), registeredFace)  
                            """
                            if (len(box) > 0):

                                cv2.imwrite(
                                    os.path.join(outPath, folder,
                                                 str(count) + '.jpg'),
                                    imutils.resize(registeredFace, height=48))

                                cv2.rectangle(registeredFace,
                                              (box[0][0], box[0][1]),
                                              (box[0][0] + box[0][2],
                                               box[0][1] + box[0][3]),
                                              (255, 0, 255), 2)
                                count = count + 1

                        except Exception as e:
                            print(e)

            cap.release()
    return faceMatrix, labels
Ejemplo n.º 16
0
            if ret == True:
                grey_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cv2.imshow('frame', grey_frame.reshape((480,640,3)))
                cv2.waitKey(1)
            else:
                break
    except KeyboardInterrupt:
        cap.release()
        cv2.destroyAllWindows()
        return

parser = argparse.ArgumentParser(description='Dialogue folder')
parser.add_argument('--folder', '-f', type=str,help='folder where the data is stored',required=True)
parser.add_argument("-p", "--shape-predictor", required=True,
	help="path to facial landmark predictor")
parser.add_argument("-fr","--frontalization-models",required=True,help="path to frontalization model")
args = parser.parse_args()

#faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
detector = dlib.get_frontal_face_detector()
shapePredictor = dlib.shape_predictor(args.shape_predictor)
frontalize_model_name = args.frontalization_models.split(os.sep)[-1].split('.')[0]
frontalizePred = frontalize.ThreeD_Model(args.frontalization_models,'model_dlib')

for root,dir,files in os.walk(args.folder):
    for file in files:
        if file.endswith('cv-video'):
            open_video_raw_data(root,file)
        if file.endswith('mp4'):
            open_video_data(os.path.join(root,file))