def get_frontal_image(image):

    predictor_path = 'dlib_models/shape_predictor_68_face_landmarks.dat'
    model3D_path = 'frontalization_models/model3Ddlib.mat'
    eyemask_path = 'frontalization_models/eyemask.mat'

    model3D = frontalize.ThreeD_Model(model3D_path, 'model_dlib')
    eyemask = np.asarray(io.loadmat(eyemask_path)['eyemask'])
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    img = cv2.imread(image, 1)
    height, width, layers = img.shape
    new_h = round(height / 3)
    new_w = round(width / 3)
    img = cv2.resize(img, (new_w, new_h))

    model3D = frontalize.ThreeD_Model('frontalization_models/model3Ddlib.mat',
                                      'model_dlib')
    lmarks = feature_detection.get_landmarks(img, detector, predictor)
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    cv2.imwrite('frontal_image.png', frontal_sym)
Beispiel #2
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    img = cv2.imread("test.jpg", 1)
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix, model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Beispiel #3
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    img = cv2.imread("test.jpg", 1)
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Beispiel #4
0
def flipInCase(img, lmarks, allModels):
    ## Check if we need to flip the image
    # yaw(摇摆)表示头部绕垂直轴旋转
    yaws = []  #np.zeros(1,len(allModels))
    ## Getting yaw estimate over poses and subjects
    # 迭代每个FaceModel对象
    for mmm in allModels.itervalues():
        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            mmm, lmarks[0])
        yaws.append(calib.get_yaw(rmat))
    yaws = np.asarray(yaws)
    # 计算 yaws 所有项的平均值
    yaw = yaws.mean()
    print '> Yaw value mean: ', yaw
    if yaw < 0:
        print '> Positive yaw detected, flipping the image'
        # 水平翻转图像
        img = cv2.flip(img, 1)
        # Flipping X values for landmarks
        lmarks[0][:, 0] = img.shape[1] - lmarks[0][:, 0]
        # Creating flipped landmarks with new indexing
        lmarks3 = np.zeros((1, 68, 2))
        for i in range(len(repLand)):
            lmarks3[0][i, :] = lmarks[0][repLand[i] - 1, :]
        lmarks = lmarks3
    return img, lmarks, yaw
def img_normalization(img, lmark):
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

    height, width, _ = img.shape

    #face_lu = np.array([min(lmark[:,0]), min(lmark[:,1])])
    #face_rd = np.array([max(lmark[:,0]), max(lmark[:,1])])
    #face_lu = face_lu - 20
    #face_rd = face_rd + 20

    #if face_lu[0]<0:
    #    face_lu[0] = 0
    #if face_lu[1]<0:
    #    face_lu[1] = 0
    #if face_rd[0] > width:
    #    face_rd[0] = width
    #if face_rd[1] > height:
    #    face_rd[1] = height
    #face_lu = face_lu.astype('uint64')
    #face_rd = face_rd.astype('uint64')

    #new_img = img[face_lu[1]:face_rd[1], face_lu[0]:face_rd[0]]
    #new_img = cv2.resize(img, (320, 320))

    #lmark = lmark - [face_lu[0], face_lu[1]]
    #w_res = 320/(face_rd[0]-face_lu[0])
    #h_res = 320/(face_rd[1]-face_lu[1])
    #lmark = lmark*[w_res, h_res]
    #new_lm = copy.copy(lmark)

    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmark)
    frontal_raw, frontal_sym, proj_map = frontalize.frontalize(
        img, proj_matrix, model3D.ref_U, eyemask)

    rot_lmark = np.zeros((lmark.shape[0], 2))
    for i in range(lmark.shape[0]):
        rot_lmark[i][1], rot_lmark[i][0] = find_nearest(proj_map, lmark[i])

    reye_ul, reye_dr = lmark_bbox(rot_lmark[36:42])
    leye_ul, leye_dr = lmark_bbox(rot_lmark[42:48])
    mouth_ul, mouth_dr = lmark_bbox(rot_lmark[48:68], pos='mouth')

    int_frontal = frontal_sym.astype('uint8')
    reye = int_frontal[reye_ul[0]:reye_dr[0], reye_ul[1]:reye_dr[1]]
    leye = int_frontal[leye_ul[0]:leye_dr[0], leye_ul[1]:leye_dr[1]]
    mouth = int_frontal[mouth_ul[0]:mouth_dr[0], mouth_ul[1]:mouth_dr[1]]

    frontal_lmark = np.copy(int_frontal)
    for i in range(len(rot_lmark)):
        cv2.circle(frontal_lmark, (int(rot_lmark[i][0]), int(rot_lmark[i][1])),
                   2, (0, 0, 255),
                   thickness=-1)

    return int_frontal, reye, leye, mouth, frontal_lmark
Beispiel #6
0
    def frontalize(self, img):
        lmarks = feature_detection.get_landmarks1(img)

        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            self.model3D, lmarks[0])
        frontal_raw, frontal_sym = frontalize.frontalize(
            img, proj_matrix, self.model3D.ref_U, self.eyemask)

        return frontal_raw
Beispiel #7
0
def decideSide_from_db(img, pose_Rt, allModels):
    ## Check if we need to flip the image
    #model3D = ThreeD_Model.FaceModel(this_path + "/models3d/" + pose_models[0] +'_01.mat', 'model3D')
    ## Getting yaw estimate over poses and subjects
    mm = allModels.values()[0]
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        mm, pose_Rt, pose_db_on=True)
    yaw = calib.get_yaw(rmat)
    print('> Yaw value mean: ', yaw)
    return yaw
Beispiel #8
0
 def frontalized(self, image):
     landmarks = facial.get_landmarks(image, self.args.resource_dir,
                                      self.args)
     if len(landmarks) > 0:
         proj_matrix, camera_matrix, rmat, tvec = camera.estimate_camera(
             self.model3D, landmarks[0])
         _, front_image = front.frontalize(image, proj_matrix,
                                           self.model3D.ref_U, self.eyemask)
         detection = self.detector(front_image, 1)
         for _, detection in enumerate(detection):
             return front_image[detection.top():detection.bottom(),
                                detection.left():detection.right()]
         return
def front_database_build(name, data_path, work_path, eyemask):
    ''' 
    Given the name of the subject in the variable -name-, the path from where to obtain the images, 
    the path to the destiny folder in which to save the database and the eyemask parameter used for
    frontalization of the faces, this routine takes the images from data_path and saves the 
    frontalized face obtained in the work_path separating pictures in folders for each individual. 
    '''
    for x in name[:]:
        pathdir = os.path.join(work_path, x)

        #print pathdir #for debug purposes
        make_sure_path_exists(pathdir)
        search_filename = x + "*.pgm"
        search_location = os.path.join(data_path, search_filename)
        #print search_location #for debug purposes

        for filepath in glob.glob(search_location):

            # Extract filename from the filepath found at search location
            tail = os.path.basename(filepath)  #os.path.split(search_location)
            #print tail #for debug purposes
            out_name = tail + 'fr.pgm'

            # For MIT-CBCL dataset file naming, break name into the image data provided in filename
            # 'subject' - 'face rotation' - etc
            file_data = re.split(r"[_]", tail)
            #print file_data #for debug purposes

            # angular limit for rotated face frontalization
            face_rot = -26

            #Copy only the images that have less than face_rot rotation angle
            if os.path.isfile(filepath) and face_rot < float(file_data[1]):

                img = cv2.imread(filepath, 1)
                # extract landmarks from the query image
                # list containing a 2D array with points (x, y) for each face detected in the query image
                lmarks = feature_detection.get_landmarks(img)
                # perform camera calibration according to the first face detected
                proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                    model3D, lmarks[0])

                # perform frontalization
                frontal_raw, frontal_sym = frontalize.frontalize(
                    img, proj_matrix, model3D.ref_U, eyemask)
                #shutil.copy(filepath, pathdir)
                #write frontal_sym to image file in pathdir location
                cv2.imwrite(os.path.join(pathdir, out_name), frontal_sym)
Beispiel #10
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    # img2 = cv2.imread("test.jpg", 1)
    img = cv2.imread(
        "/home/jordan/PycharmProjects/Emotion_Analysis/Facial_Expression_recog/data/test_photo.jpg",
        1)
    img = cv2.resize(img, (int(img.shape[0] / 3), int(img.shape[1] / 9)))
    # detected_object = mouthdetection.findmouth(img, haar_face, haar_mouth)[0]
    # img = smile_detection.crop(detected_object, img, extended = True)
    cv2.imshow("", img)
    cv2.waitKey(5000)
    img = img.astype(np.uint8)
    # print img.shape
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Beispiel #11
0
def demo(path):

    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')

    img = cv2.imread(path, 1)

    lmarks = feature_detection.get_landmarks(img)

    if (len(lmarks) > 0):
        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            model3D, lmarks[0])

        eyemask = np.asarray(
            io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

        frontal_raw, frontal_sym = frontalize.frontalize(
            img, proj_matrix, model3D.ref_U, eyemask)
        cv2.imwrite(path, frontal_raw)
Beispiel #12
0
    def frontalize(self, img):
        lmarks = feature_detection.get_landmarks1(img)
        '''
        plt.figure()
        plt.title('Landmarks Detected')
        plt.imshow(img[:, :, ::-1])
        plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
        '''

        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(self.model3D, lmarks[0])
        frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix, self.model3D.ref_U, self.eyemask)
        '''
        plt.figure()
        plt.title('Frontalized no symmetry')
        plt.imshow(frontal_raw[:, :, ::-1])
        plt.figure()
        plt.title('Frontalized with soft symmetry')
        plt.imshow(frontal_sym[:, :, ::-1])
        plt.show()
        '''
        return frontal_raw
Beispiel #13
0
def demo(image):
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    img = cv2.resize(image, (250, 250), interpolation=cv2.INTER_LINEAR)

    lmarks, f = feature_detection.get_landmarks(img)

    if not f:
        return False, 1

    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])

    print(proj_matrix)

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization

    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    '''
    plt.figure()
    plt.title('Image frontalized(Before symmetry)')
    plt.imshow(frontal_raw[:,:,::-1].astype('uint8'))
    plt.figure()
    plt.title('Image frontalized(After symmetry)')
    plt.imshow(frontal_sym[:,:,::-1].astype('uint8'))
    '''
    x, y, z = grammer.get_angle(rmat)
    print(('旋转的角度: x: {}, y: {}, z: {}').format(x, y, z))  #估算大概的旋转角度
    image_output = frontal_sym.astype('uint8')
    return True, image_output
Beispiel #14
0
def demo():
    nSub = opts.getint('general', 'nTotSub')
    fileList, outputFolder = myutil.parse(sys.argv)
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/d.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    ## Preloading all the models for speed
    allModels = myutil.preload(this_path, pose_models_folder, pose_models,
                               nSub)

    for f in fileList:
        if '#' in f:  #skipping comments
            continue
        splitted = f.split(',')
        image_key = splitted[0]
        image_path = splitted[1]
        image_landmarks = splitted[2]
        img = cv2.imread(image_path, 1)
        if image_landmarks != "None":
            lmark = np.loadtxt(image_landmarks)
            lmarks = []
            lmarks.append(lmark)
        else:
            print('> Detecting landmarks')
            lmarks = feature_detection.get_landmarks(img, this_path)

        if len(lmarks) != 0:
            ## Copy back original image and flipping image in case we need
            ## This flipping is performed using all the model or all the poses
            ## To refine the estimation of yaw. Yaw can change from model to model...
            img_display = img.copy()
            img, lmarks, yaw = myutil.flipInCase(img, lmarks, allModels)
            listPose = myutil.decidePose(yaw, opts, newModels)
            ## Looping over the poses
            for poseId in listPose:
                posee = pose_models[poseId]
                ## Looping over the subjects
                for subj in range(1, nSub + 1):
                    pose = posee + '_' + str(subj).zfill(2) + '.mat'
                    print('> Looking at file: ' + image_path + ' with ' + pose)
                    # load detections performed by dlib library on 3D model and Reference Image
                    print("> Using pose model in " + pose)
                    ## Indexing the right model instead of loading it each time from memory.
                    model3D = allModels[pose]
                    eyemask = model3D.eyemask
                    # perform camera calibration according to the first face detected
                    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                        model3D, lmarks[0])
                    ## We use eyemask only for frontal
                    if not myutil.isFrontal(pose):
                        eyemask = None
                    ##### Main part of the code: doing the rendering #############
                    rendered_raw, rendered_sym, face_proj, background_proj, temp_proj2_out_2, sym_weight = renderer.render(img, proj_matrix,\
                                                                                             model3D.ref_U, eyemask, model3D.facemask, opts)
                    ########################################################

                    if myutil.isFrontal(pose):
                        rendered_raw = rendered_sym
                    ## Cropping if required by crop_models
                    rendered_raw = myutil.cropFunc(pose, rendered_raw,
                                                   crop_models[poseId])
                    ## Resizing if required
                    if resizeCNN:
                        rendered_raw = cv2.resize(
                            rendered_raw, (cnnSize, cnnSize),
                            interpolation=cv2.INTER_CUBIC)
                    ## Saving if required
                    if opts.getboolean('general', 'saveON'):
                        subjFolder = outputFolder + '/' + image_key.split(
                            '_')[0]
                        myutil.mymkdir(subjFolder)
                        savingString = subjFolder + '/' + image_key + '_rendered_' + pose[
                            8:-7] + '_' + str(subj).zfill(2) + '.jpg'
                        cv2.imwrite(savingString, rendered_raw)

                    ## Plotting if required
                    if opts.getboolean('general', 'plotON'):
                        myutil.show(img_display, img, lmarks, rendered_raw, \
                        face_proj, background_proj, temp_proj2_out_2, sym_weight)
        else:
            print('> Landmark not detected for this image...')
def readVideosAndLabels(directory, alignment="2D"):
    namesUnique = os.listdir(directory)
    labels = []
    faceMatrix = None
    outPath = directory + "_aligned_" + alignment
    createDirectory(outPath)

    if (alignment != "2D"):
        model3D = frontalize.ThreeD_Model(
            "./frontalization_models/model3Ddlib.mat", 'model_dlib')
        eyemask = np.asarray(
            sio.loadmat('frontalization_models/eyemask.mat')['eyemask'])
        eyemask = imutils.resize(eyemask[52:250, 91:225], height=60)
        model3D.ref_U = imutils.resize(model3D.ref_U[52:250, 91:225, :],
                                       height=60)

        model3D.out_A = np.asmatrix(np.array(
            [[0.5 * 506.696672, 0, 0.5 * 324.202],
             [0, 0.5 * 506.3752, 0.5 * 245.7785096], [0, 0, 1]]),
                                    dtype='float32')  #3x3

        model3D.distCoeff = None

    count = 0
    for ind, folder in enumerate(namesUnique):
        createDirectory(os.path.join(outPath, folder))

        for path in os.listdir(os.path.join(directory, folder)):

            path = os.path.join(directory, folder, path)
            cap = cv2.VideoCapture(path)
            trackingLost = True

            while (True):

                ret, imageO = cap.read()

                if (not ret):
                    break
                landmarks, faceROI, trackingLost, image = trackFaceInANeighborhoodAndDetectLandmarks(
                    np.copy(imageO),
                    faceROI=[0, 0, imageO.shape[0] - 1, imageO.shape[1] - 1],
                    drawBoundingBoxes=True)
                if (trackingLost):
                    continue
                for k, landmark in enumerate(landmarks):

                    if (alignment == "2D"):
                        registeredFace = performFaceAlignment(imageO,
                                                              landmark,
                                                              cols=600,
                                                              rows=600)
                    else:
                        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                            model3D, landmark)
                        frontal_raw, registeredFace = frontalize.frontalize(
                            imageO, proj_matrix, model3D.ref_U, eyemask)
                        xdir = 2 * np.array([-49.6694, -0.3201, 1.0163])
                        ydir = 4 * np.array([-0.9852, -3.1128, 15.0628])
                        zdir = -np.array([-1.658, 747.159, 154.29]) / 5.0
                        origin = np.array([-0.0845, -74.7281, 27.2774])
                        image, _ = model3D.drawCoordinateSystems(
                            np.hstack((rmat, tvec)),
                            image,
                            _3Dpoints=np.array([
                                origin, origin + xdir, origin + ydir,
                                origin + zdir
                            ]))
                        #image=model3D.drawCandideMesh(np.hstack((rmat,tvec)),image)

                    if (registeredFace is not None):

                        cv2.polylines(image,
                                      np.int32(landmark.reshape((-1, 1, 2))),
                                      True, (0, 0, 255), 3)

                        box = goodAlignement_cascade.detectMultiScale(
                            registeredFace, 1.1, 1, minSize=(32, 32))

                        try:
                            """count=count+1
                            cv2.imwrite(os.path.join(outPath,folder,str(count)+'.jpg'), registeredFace)  
                            """
                            if (len(box) > 0):

                                cv2.imwrite(
                                    os.path.join(outPath, folder,
                                                 str(count) + '.jpg'),
                                    imutils.resize(registeredFace, height=48))

                                cv2.rectangle(registeredFace,
                                              (box[0][0], box[0][1]),
                                              (box[0][0] + box[0][2],
                                               box[0][1] + box[0][3]),
                                              (255, 0, 255), 2)
                                count = count + 1

                        except Exception as e:
                            print(e)

            cap.release()
    return faceMatrix, labels
def performFaceRecognitionWithFrontalisationV2(image, recognizer, model3D,
                                               eyemask, names):
    start = time.time()
    imageWithLandmarks, landmarks, faceROIs = searchForFaceInTheWholeImage(
        np.copy(image))
    result = []

    registeredFaceColor = 255
    raw = 255
    for landmark, faceROI in zip(landmarks, faceROIs):

        goodAlignment = False
        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            model3D, landmark)
        raw, registeredFaceColor = frontalize.frontalize(
            image, proj_matrix, model3D.ref_U, eyemask)
        registeredFaceGray = cv2.cvtColor(registeredFaceColor,
                                          cv2.COLOR_BGR2GRAY)
        box = goodAlignement_cascade.detectMultiScale(registeredFaceGray,
                                                      1.1,
                                                      1,
                                                      minSize=(32, 32))

        ###############
        registeredFace = imutils.resize(registeredFaceGray, height=48)

        try:
            cv2.rectangle(registeredFace, (box[0][1], box[0][0]),
                          (box[0][1] + box[0][3], box[0][0] + box[0][2]),
                          (255, 0, 255), 2)
            goodAlignment = True
        except:

            pass
        if (goodAlignment):
            pred, conf = recognizer.predict(registeredFace)

            xdir = 2 * np.array([-49.6694, -0.3201, 1.0163])
            ydir = 4 * np.array([-0.9852, -3.1128, 15.0628])
            zdir = -np.array([-1.658, 747.159, 154.29]) / 5.0
            origin = np.array([-0.0845, -74.7281, 27.2774])
            try:
                image, _ = model3D.drawCoordinateSystems(np.hstack(
                    (rmat, tvec)),
                                                         imageWithLandmarks,
                                                         _3Dpoints=np.array([
                                                             origin,
                                                             origin + xdir,
                                                             origin + ydir,
                                                             origin + zdir
                                                         ]))
            except:
                pass

            identity = list(names.keys())[list(names.values()).index(pred)]
            cv2.rectangle(image, (faceROI[1], faceROI[0]),
                          (faceROI[1] + faceROI[3], faceROI[0] + faceROI[2]),
                          (255, 0, 255), 2)
            cv2.putText(image,
                        identity, (faceROI[1], faceROI[0]),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1, (0, 0, 255),
                        thickness=1)

    end = time.time()

    cv2.putText(image,
                "FPS: " + "{0:.2f}".format(round(1.0 / (end - start), 2)),
                (15, 15),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5, (0, 0, 255),
                thickness=1)

    return image, registeredFaceColor
Beispiel #17
0
def render_fpn(inputFile, output_pose_db, outputFolder):
    ## Opening FPN pose db
    pose_env = lmdb.open(output_pose_db, readonly=True)
    pose_cnn_lmdb = pose_env.begin()
    ## looping over images
    with open(inputFile, 'r') as csvfile:
        csvreader = csv.reader(csvfile, delimiter=',')
        lines = csvfile.readlines()
        for lin in lines:
            ### key1, image_path_key_1
            image_key = lin.split(',')[0]
            if 'flip' in image_key:
                continue

            image_path = lin.split(',')[-1].rstrip('\n')
            img = cv2.imread(image_path, 1)
            pose_Rt_raw = pose_cnn_lmdb.get(image_key)
            pose_Rt_flip_raw = pose_cnn_lmdb.get(image_key + '_flip')

            if pose_Rt_raw is not None:
                pose_Rt = np.frombuffer(pose_Rt_raw, np.float32)
                pose_Rt_flip = np.frombuffer(pose_Rt_flip_raw, np.float32)

                yaw = myutil.decideSide_from_db(img, pose_Rt, allModels)

                if yaw < 0:  # Flip image and get the corresponsidng pose
                    img = cv2.flip(img, 1)
                    pose_Rt = pose_Rt_flip

                listPose = myutil.decidePose(yaw, opts, newModels)
                ## Looping over the poses
                for poseId in listPose:
                    posee = pose_models[poseId]
                    ## Looping over the subjects
                    for subj in [10]:
                        pose = posee + '_' + str(subj).zfill(2) + '.mat'
                        print '> Looking at file: ' + image_path + ' with ' + pose
                        # load detections performed by dlib library on 3D model and Reference Image
                        print "> Using pose model in " + pose
                        ## Indexing the right model instead of loading it each time from memory.
                        model3D = allModels[pose]
                        eyemask = model3D.eyemask
                        # perform camera calibration according to the first face detected
                        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                            model3D, pose_Rt, pose_db_on=True)
                        ## We use eyemask only for frontal
                        if not myutil.isFrontal(pose):
                            eyemask = None
                        ##### Main part of the code: doing the rendering #############
                        rendered_raw, rendered_sym, face_proj, background_proj, temp_proj2_out_2, sym_weight = renderer_core.render(img, proj_matrix,\
                                                                                                 model3D.ref_U, eyemask, model3D.facemask, opts)
                        ########################################################

                        if myutil.isFrontal(pose):
                            rendered_raw = rendered_sym
                        ## Cropping if required by crop_models
                        #rendered_raw = myutil.cropFunc(pose,rendered_raw,crop_models[poseId])
                        ## Resizing if required
                        #if resizeCNN:
                        #    rendered_raw = cv2.resize(rendered_raw, ( cnnSize, cnnSize ), interpolation=cv2.INTER_CUBIC )
                        ## Saving if required
                        if opts.getboolean('general', 'saveON'):
                            subjFolder = outputFolder + '/' + image_key.split(
                                '_')[0]
                            myutil.mymkdir(subjFolder)
                            savingString = subjFolder + '/' + image_key + '_rendered_' + pose[
                                8:-7] + '_' + str(subj).zfill(2) + '.jpg'
                            cv2.imwrite(savingString, rendered_raw)
def performFaceRecognitionWithFrontalisation(image, recognizer, names, model3D,
                                             eyemask):
    #indiceToSymmetry=[17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,  27,26,25,24,23, 22,21,20,19,18, 28,29,30,31, 36,35,35,33,32, 46,45,44,43,48,47,  40,39,38,37,42,41,  55,54,53,52,51,50,49,60,59,58,57,56, 65,64,63,62,61,68,67,66]
    #indiceToSymmetry=list(np.array(indiceToSymmetry)-1)

    imageWithLandmarks, landmarks, faceROIs = searchForFaceInTheWholeImage(
        np.copy(image))
    result = []
    for landmark, faceROI in zip(landmarks, faceROIs):
        #registeredFace=performFaceAlignment(image,landmark,cols=600,rows=600)
        #landmark[range(0,68),:]=landmark[indiceToSymmetry,:]

        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            model3D, landmark)
        # load mask to exclude eyes from symmetry
        # perform frontalization
        frontal_raw, registeredFace = frontalize.frontalize(
            image, proj_matrix, model3D.ref_U, eyemask)
        registeredFace = imutils.resize(registeredFace, height=100)

        cv2.imshow('el3ab', cv2.cvtColor(registeredFace, cv2.COLOR_BGR2GRAY))

        cv2.waitKey(1)

        pred, conf = recognizer.predict(
            cv2.cvtColor(registeredFace, cv2.COLOR_BGR2GRAY))

        xdir = 2 * np.array([-49.6694, -0.3201, 1.0163])
        ydir = 4 * np.array([-0.9852, -3.1128, 15.0628])
        zdir = -np.array([-1.658, 747.159, 154.29]) / 5.0
        origin = np.array([-0.0845, -74.7281, 27.2774])
        image, _ = model3D.drawCoordinateSystems(np.hstack((rmat, tvec)),
                                                 image,
                                                 _3Dpoints=np.array([
                                                     origin, origin + xdir,
                                                     origin + ydir,
                                                     origin + zdir
                                                 ]))
        image = model3D.drawCandideMesh(np.hstack((rmat, tvec)), image)
        #
        #distance, indice = svm.kneighbors(faceVector.reshape(1,-1))
        #indice=classifier.predict(embeddedFace.reshape(1,-1))
        #pred=svm_Tuned.predict(faceVector.reshape(1,-1))[0]

        ###distance, indice = nbrs.kneighbors(embeddedFace.reshape(1,-1))
        #indice=classifier.predict(embeddedFace.reshape(1,-1))
        identity = list(names.keys())[list(names.values()).index(pred)]
        print(pred, conf)
        ##(#cv2.rectangle(image,(faceROI[1],faceROI[0]),(faceROI[1]+faceROI[3],faceROI[0]+faceROI[2]),(255,0,255),2)
        ###cv2.putText(image,indice[0],(faceROI[1],faceROI[0]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))
        cv2.rectangle(image, (faceROI[1], faceROI[0]),
                      (faceROI[1] + faceROI[3], faceROI[0] + faceROI[2]),
                      (255, 0, 255), 2)

        cv2.putText(image,
                    identity, (faceROI[1], faceROI[0]),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1, (255, 0, 0),
                    thickness=2)
        ###result.append(indice[0])
        #print (distance,labels[indice[0][0]])
        """if(distance<threshold):
            
            cv2.rectangle(image,(faceROI[1],faceROI[0]),(faceROI[1]+faceROI[3],faceROI[0]+faceROI[2]),(255,0,255),2)      
            cv2.putText(image,labels[indice[0][0]]   ,(faceROI[1],faceROI[0]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))
            
            result.append(labels[indice[0][0]])
        
        else:
            cv2.rectangle(image,(faceROI[1],faceROI[0]),(faceROI[1]+faceROI[3],faceROI[0]+faceROI[2]),(255,0,255),2)      
            cv2.putText(image,"unknown"   ,(faceROI[1],faceROI[0]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))
            
            result.append("unknown")
        """

    cv2.imshow('image', image)
    cv2.waitKey(1)
    return result, faceROIs
Beispiel #19
0
def get_video_points(
        video_file,
        predictor_path='dlib_models/shape_predictor_68_face_landmarks.dat',
        model3D_path='frontalization_models/model3Ddlib.mat',
        eyemask_path='frontalization_models/eyemask.mat',
        mode='face',
        resize=False):

    time_list = []
    points_list = []
    points_list_sym = []
    points_list_raw = []

    model3D = frontalize.ThreeD_Model(model3D_path, 'model_dlib')
    eyemask = np.asarray(io.loadmat(eyemask_path)['eyemask'])
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    cap = cv2.VideoCapture(video_file)
    fps = cap.get(cv2.CAP_PROP_FPS)
    print('fps=', fps)

    # width = 320
    # height = 320

    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # video_sym = cv2.VideoWriter('video_sym.avi', fourcc, fps, (width, height))
    # video_raw = cv2.VideoWriter('video_raw.avi', fourcc, fps, (width, height))

    while True:
        try:

            ret, frame = cap.read()
            if not ret:
                print("no ret")
                break

            msec = cap.get(cv2.CAP_PROP_POS_MSEC)
            print(msec / 1000)

            img = frame
            iheight, iwidth, layers = img.shape

            #resize
            if resize == True:
                new_h = round(iheight / 3)
                new_w = round(iwidth / 3)
                img = cv2.resize(img, (new_w, new_h))

            lmarks = feature_detection.get_landmarks(img, detector, predictor)

            if len(lmarks) != 1:
                continue

            proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                model3D, lmarks[0])

            frontal_raw, frontal_sym = frontalize.frontalize(
                img, proj_matrix, model3D.ref_U, eyemask)

            time_list.append(msec)

            new_img_sym = np.round(frontal_sym).astype(np.uint8)
            new_img_raw = np.round(frontal_raw).astype(np.uint8)
            temp_pts = []
            lmarks_sym = feature_detection.get_landmarks(
                new_img_sym, detector, predictor)
            temp_pts = [[x, y] for x, y in lmarks_sym[0]]

            if mode == 'mouth':
                temp_pts = temp_pts[33:34] + temp_pts[48:]

            points_list_sym.append(temp_pts)

            # video_sym.write(new_img_sym)
            # video_raw.write(new_img_raw)

        except ValueError as e:
            print(e)
            break

    cap.release()
    points_list = list(zip(time_list, points_list_sym))

    with shelve.open(
            'extracted_data/' + video_file.split('/')[-1].split('.')[0] +
            '.txt', 'c') as db:
        for i, x in enumerate(points_list):
            db[str(i)] = x

    return (points_list)
Beispiel #20
0
def myfrontalize(X, limit=0):
    count = 0

    if (limit == 0):
        limit = X.shape[0]

    print("Total Images: ", limit)

    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()

    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

    for i in range(0, limit):
        print("\r", end='')
        print("Images Completed: {0}".format(i), end='', flush=True)

        # cast img to type int for cv2
        img = X[i, :, :, 0].astype(np.uint8)

        # create a color version for frontalizer stuffs
        c_img = np.copy(img)
        c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)

        # extract landmarks from the query image
        # list containing a 2D array with points (x, y) for each face detected in the query image
        lmarks = feature_detection.get_landmarks(c_img)
        if type(lmarks) is np.ndarray:

            # perform camera calibration according to the first face detected
            proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                model3D, lmarks[0])

            # perform frontalization and convert result to grayscale
            frontal_raw, frontal_sym = frontalize.frontalize(
                c_img, proj_matrix, model3D.ref_U, eyemask)
            temp = cv2.cvtColor(frontal_raw, cv2.COLOR_BGR2GRAY)

            # find nonzero bbox and crop image to remove uncessesary black space from edges
            temp_mask = cv2.findNonZero(temp)
            t_x, t_y, t_w, t_h = cv2.boundingRect(temp_mask)
            t_bbox = temp[t_y:t_y + t_h, t_x:t_x + t_w]

            # resize the cropped image to the appropriate dimensions for network
            t_bbox = cv2.resize(t_bbox, dsize=(48, 48))
            t_bbox = np.resize(t_bbox, (48, 48, 1))
            X[i] = t_bbox.astype(np.float32)

            plt.show()
            count += 1

    print()
    print('{} images out of {} were frontalized.'.format(count, limit))
def readVideosAndLabels(directory):
    namesUnique = os.listdir(directory)
    labels = []
    faceMatrix = None
    print(namesUnique)

    indiceToSymmetry = [
        17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 27, 26, 25,
        24, 23, 22, 21, 20, 19, 18, 28, 29, 30, 31, 36, 35, 35, 33, 32, 46, 45,
        44, 43, 48, 47, 40, 39, 38, 37, 42, 41, 55, 54, 53, 52, 51, 50, 49, 60,
        59, 58, 57, 56, 65, 64, 63, 62, 61, 68, 67, 66
    ]
    indiceToSymmetry = list(np.array(indiceToSymmetry) - 1)

    model3D = frontalize.ThreeD_Model(
        "./frontalization_models/model3Ddlib.mat", 'model_dlib')

    eyemask = np.asarray(
        sio.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    for ind, folder in enumerate(namesUnique):
        for path in os.listdir(os.path.join(directory, folder)):

            path = os.path.join(directory, folder, path)
            cap = cv2.VideoCapture(path)
            count = 0
            trackingLost = True
            print(folder)
            """
            if(folder!="Romuald"):
                print("a")
                
                if(folder=="Stephane"):
                    pass
                else:
                   break
            """
            if (folder != "Florian"):
                continue
            while (True):

                ret, imageO = cap.read()

                if (count % 2 == 1):

                    if (not ret):
                        break
                    #print(imageO.shape)
                    #image = cv2.resize(image,(700,700))
                    if (folder == "Faddwzi"):
                        #image=imutils.rotate_bound(image, -90)
                        break
                        #pass
                    else:
                        #image=imutils.rotate_bound(image, 90)
                        image = np.ascontiguousarray(np.transpose(
                            np.copy(imageO), (1, 0, 2)),
                                                     dtype=np.uint8)
                        #image=np.ascontiguousarray(rotateImage(imageO),dtype=np.uint8)
                    #image=imutils.rotate_bound(image, 90)
                    #image=imutils.resize(image, width=370)

                    #imageWithLandmarks,landmarks,_=searchForFaceInTheWholeImage(np.copy(image))
                    if (trackingLost):
                        landmarks, faceROI, trackingLost, image = trackFaceInANeighborhoodAndDetectLandmarks(
                            np.copy(image),
                            faceROI=[
                                0, 0, image.shape[0] - 1, image.shape[1] - 1
                            ],
                            drawBoundingBoxes=True)
                    else:
                        landmarks, faceROI, trackingLost, image = trackFaceInANeighborhoodAndDetectLandmarks(
                            np.copy(image), faceROI[0], drawBoundingBoxes=True)
                    #print(trackingLost,faceROI)
                    cv2.imshow('image', image)
                    cv2.waitKey(1)
                    if (trackingLost):
                        continue
                    #pdb.set_trace()
                    #landmarks[0][:,1]=landmarks[0][:,1]+np.sign(image.shape[]landmarks[0][:,1])
                    landmarks[0][:, [0, 1]] = landmarks[0][:, [1, 0]]
                    #landmarks[0][range(0,68)]=landmarks[0][indiceToSymmetry,:]
                    #pdb.set_trace()
                    #######registeredFace=performFaceAlignment(image,landmarks[0],cols=600,rows=600)
                    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                        model3D, landmarks[0])
                    # load mask to exclude eyes from symmetry
                    # perform frontalization
                    frontal_raw, registeredFace = frontalize.frontalize(
                        imageO, proj_matrix, model3D.ref_U, eyemask)

                    if (registeredFace is not None):

                        faceVector = ((registeredFace) / 255.0).reshape(-1)
                        if (faceMatrix is None):
                            faceMatrix = faceVector
                        else:
                            faceMatrix = np.vstack((faceMatrix, faceVector))
                        labels.append(folder)
                        for landmark in landmarks:
                            cv2.polylines(
                                image, np.int32(landmark.reshape((-1, 1, 2))),
                                True, (0, 0, 255), 3)
                        cv2.imshow('image333', frontal_raw)
                        cv2.imshow('aligned face', registeredFace)
                        cv2.waitKey(1)
                    cv2.imwrite(
                        os.path.join(directory, folder,
                                     str(count) + '.jpg'), registeredFace)

                count = count + 1
            cap.release()
    return faceMatrix, labels