def get_frontal_image(image):

    predictor_path = 'dlib_models/shape_predictor_68_face_landmarks.dat'
    model3D_path = 'frontalization_models/model3Ddlib.mat'
    eyemask_path = 'frontalization_models/eyemask.mat'

    model3D = frontalize.ThreeD_Model(model3D_path, 'model_dlib')
    eyemask = np.asarray(io.loadmat(eyemask_path)['eyemask'])
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    img = cv2.imread(image, 1)
    height, width, layers = img.shape
    new_h = round(height / 3)
    new_w = round(width / 3)
    img = cv2.resize(img, (new_w, new_h))

    model3D = frontalize.ThreeD_Model('frontalization_models/model3Ddlib.mat',
                                      'model_dlib')
    lmarks = feature_detection.get_landmarks(img, detector, predictor)
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    cv2.imwrite('frontal_image.png', frontal_sym)
Exemple #2
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    img = cv2.imread("test.jpg", 1)
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Exemple #3
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    img = cv2.imread("test.jpg", 1)
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix, model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
def get_angle(image, pt3d, sess):
    lmarks = feature_detection.get_landmarks(image, detector, predictor)
    Pose_Para = []
    if len(lmarks):
        start = time.time()
        pt3d = np.load('pt3d.npy')
        Pose_Para = PE.poseEstimation(image, lmarks, pt3d, sess)
        print("calauate pose cost : ", time.time() - start)
        return np.array(Pose_Para) * 180 / math.pi, lmarks
    else:
        print('can not find landmark!')
        return None, None
Exemple #5
0
 def frontalized(self, image):
     landmarks = facial.get_landmarks(image, self.args.resource_dir,
                                      self.args)
     if len(landmarks) > 0:
         proj_matrix, camera_matrix, rmat, tvec = camera.estimate_camera(
             self.model3D, landmarks[0])
         _, front_image = front.frontalize(image, proj_matrix,
                                           self.model3D.ref_U, self.eyemask)
         detection = self.detector(front_image, 1)
         for _, detection in enumerate(detection):
             return front_image[detection.top():detection.bottom(),
                                detection.left():detection.right()]
         return
Exemple #6
0
def demo():
    img = io.imread(image_path)
    # 检测特征点
    time3 = time.time()
    lmarks = feature_detection.get_landmarks(img, detector, predictor)
    time4 = time.time()
    if len(lmarks):
        Pose_Para = PE.poseEstimation(img, lmarks)
        time5 = time.time()
    else:
        print('NO face detected!')
        return 0

    print('Dlib Model Load time is:', time2-time1, 'Lmarks Detect time is:', time4-time3, 'Pose estimation time is:', time5-time4)
    print(np.array(Pose_Para)*180/math.pi)
def front_database_build(name, data_path, work_path, eyemask):
    ''' 
    Given the name of the subject in the variable -name-, the path from where to obtain the images, 
    the path to the destiny folder in which to save the database and the eyemask parameter used for
    frontalization of the faces, this routine takes the images from data_path and saves the 
    frontalized face obtained in the work_path separating pictures in folders for each individual. 
    '''
    for x in name[:]:
        pathdir = os.path.join(work_path, x)

        #print pathdir #for debug purposes
        make_sure_path_exists(pathdir)
        search_filename = x + "*.pgm"
        search_location = os.path.join(data_path, search_filename)
        #print search_location #for debug purposes

        for filepath in glob.glob(search_location):

            # Extract filename from the filepath found at search location
            tail = os.path.basename(filepath)  #os.path.split(search_location)
            #print tail #for debug purposes
            out_name = tail + 'fr.pgm'

            # For MIT-CBCL dataset file naming, break name into the image data provided in filename
            # 'subject' - 'face rotation' - etc
            file_data = re.split(r"[_]", tail)
            #print file_data #for debug purposes

            # angular limit for rotated face frontalization
            face_rot = -26

            #Copy only the images that have less than face_rot rotation angle
            if os.path.isfile(filepath) and face_rot < float(file_data[1]):

                img = cv2.imread(filepath, 1)
                # extract landmarks from the query image
                # list containing a 2D array with points (x, y) for each face detected in the query image
                lmarks = feature_detection.get_landmarks(img)
                # perform camera calibration according to the first face detected
                proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                    model3D, lmarks[0])

                # perform frontalization
                frontal_raw, frontal_sym = frontalize.frontalize(
                    img, proj_matrix, model3D.ref_U, eyemask)
                #shutil.copy(filepath, pathdir)
                #write frontal_sym to image file in pathdir location
                cv2.imwrite(os.path.join(pathdir, out_name), frontal_sym)
Exemple #8
0
def demo():
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    # load query image
    # img2 = cv2.imread("test.jpg", 1)
    img = cv2.imread(
        "/home/jordan/PycharmProjects/Emotion_Analysis/Facial_Expression_recog/data/test_photo.jpg",
        1)
    img = cv2.resize(img, (int(img.shape[0] / 3), int(img.shape[1] / 9)))
    # detected_object = mouthdetection.findmouth(img, haar_face, haar_mouth)[0]
    # img = smile_detection.crop(detected_object, img, extended = True)
    cv2.imshow("", img)
    cv2.waitKey(5000)
    img = img.astype(np.uint8)
    # print img.shape
    plt.title('Query Image')
    plt.imshow(img[:, :, ::-1])
    # extract landmarks from the query image
    # list containing a 2D array with points (x, y) for each face detected in the query image
    lmarks = feature_detection.get_landmarks(img)
    plt.figure()
    plt.title('Landmarks Detected')
    plt.imshow(img[:, :, ::-1])
    plt.scatter(lmarks[0][:, 0], lmarks[0][:, 1])
    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])
    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization
    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    plt.figure()
    plt.title('Frontalized no symmetry')
    plt.imshow(frontal_raw[:, :, ::-1])
    plt.figure()
    plt.title('Frontalized with soft symmetry')
    plt.imshow(frontal_sym[:, :, ::-1])
    plt.show()
Exemple #9
0
def demo(path):

    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')

    img = cv2.imread(path, 1)

    lmarks = feature_detection.get_landmarks(img)

    if (len(lmarks) > 0):
        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
            model3D, lmarks[0])

        eyemask = np.asarray(
            io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

        frontal_raw, frontal_sym = frontalize.frontalize(
            img, proj_matrix, model3D.ref_U, eyemask)
        cv2.imwrite(path, frontal_raw)
Exemple #10
0
def demo(image):
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')
    img = cv2.resize(image, (250, 250), interpolation=cv2.INTER_LINEAR)

    lmarks, f = feature_detection.get_landmarks(img)

    if not f:
        return False, 1

    # perform camera calibration according to the first face detected
    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
        model3D, lmarks[0])

    print(proj_matrix)

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])
    # perform frontalization

    frontal_raw, frontal_sym = frontalize.frontalize(img, proj_matrix,
                                                     model3D.ref_U, eyemask)
    '''
    plt.figure()
    plt.title('Image frontalized(Before symmetry)')
    plt.imshow(frontal_raw[:,:,::-1].astype('uint8'))
    plt.figure()
    plt.title('Image frontalized(After symmetry)')
    plt.imshow(frontal_sym[:,:,::-1].astype('uint8'))
    '''
    x, y, z = grammer.get_angle(rmat)
    print(('旋转的角度: x: {}, y: {}, z: {}').format(x, y, z))  #估算大概的旋转角度
    image_output = frontal_sym.astype('uint8')
    return True, image_output
def get_def_pts(
        image,
        predictor_path='dlib_models/shape_predictor_68_face_landmarks.dat',
        model3D_path='frontalization_models/model3Ddlib.mat',
        eyemask_path='frontalization_models/eyemask.mat',
        resize=False):

    list_def_face = []

    for f_name in sorted(os.listdir('data')):
        if f_name.startswith('def_face'):
            list_def_face.append(f_name)

    points_list = []
    print(image)
    img = cv2.imread(image, 1)

    height, width, layers = img.shape
    if resize == True:
        new_h = round(height / 3)
        new_w = round(width / 3)
        img = cv2.resize(img, (new_w, new_h))

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    lmarks = feature_detection.get_landmarks(img, detector, predictor)
    points_list = [[x, y] for x, y in lmarks[0]]

    for num, file in enumerate(list_def_face):
        with shelve.open('extracted_data/default_points_{}.txt'.format(num),
                         'c') as db:
            for i, x in enumerate(points_list):
                db['default_face'] = points_list

    return (points_list)
Exemple #12
0
def demo():
    nSub = opts.getint('general', 'nTotSub')
    fileList, outputFolder = myutil.parse(sys.argv)
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/d.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    ## Preloading all the models for speed
    allModels = myutil.preload(this_path, pose_models_folder, pose_models,
                               nSub)

    for f in fileList:
        if '#' in f:  #skipping comments
            continue
        splitted = f.split(',')
        image_key = splitted[0]
        image_path = splitted[1]
        image_landmarks = splitted[2]
        img = cv2.imread(image_path, 1)
        if image_landmarks != "None":
            lmark = np.loadtxt(image_landmarks)
            lmarks = []
            lmarks.append(lmark)
        else:
            print('> Detecting landmarks')
            lmarks = feature_detection.get_landmarks(img, this_path)

        if len(lmarks) != 0:
            ## Copy back original image and flipping image in case we need
            ## This flipping is performed using all the model or all the poses
            ## To refine the estimation of yaw. Yaw can change from model to model...
            img_display = img.copy()
            img, lmarks, yaw = myutil.flipInCase(img, lmarks, allModels)
            listPose = myutil.decidePose(yaw, opts, newModels)
            ## Looping over the poses
            for poseId in listPose:
                posee = pose_models[poseId]
                ## Looping over the subjects
                for subj in range(1, nSub + 1):
                    pose = posee + '_' + str(subj).zfill(2) + '.mat'
                    print('> Looking at file: ' + image_path + ' with ' + pose)
                    # load detections performed by dlib library on 3D model and Reference Image
                    print("> Using pose model in " + pose)
                    ## Indexing the right model instead of loading it each time from memory.
                    model3D = allModels[pose]
                    eyemask = model3D.eyemask
                    # perform camera calibration according to the first face detected
                    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                        model3D, lmarks[0])
                    ## We use eyemask only for frontal
                    if not myutil.isFrontal(pose):
                        eyemask = None
                    ##### Main part of the code: doing the rendering #############
                    rendered_raw, rendered_sym, face_proj, background_proj, temp_proj2_out_2, sym_weight = renderer.render(img, proj_matrix,\
                                                                                             model3D.ref_U, eyemask, model3D.facemask, opts)
                    ########################################################

                    if myutil.isFrontal(pose):
                        rendered_raw = rendered_sym
                    ## Cropping if required by crop_models
                    rendered_raw = myutil.cropFunc(pose, rendered_raw,
                                                   crop_models[poseId])
                    ## Resizing if required
                    if resizeCNN:
                        rendered_raw = cv2.resize(
                            rendered_raw, (cnnSize, cnnSize),
                            interpolation=cv2.INTER_CUBIC)
                    ## Saving if required
                    if opts.getboolean('general', 'saveON'):
                        subjFolder = outputFolder + '/' + image_key.split(
                            '_')[0]
                        myutil.mymkdir(subjFolder)
                        savingString = subjFolder + '/' + image_key + '_rendered_' + pose[
                            8:-7] + '_' + str(subj).zfill(2) + '.jpg'
                        cv2.imwrite(savingString, rendered_raw)

                    ## Plotting if required
                    if opts.getboolean('general', 'plotON'):
                        myutil.show(img_display, img, lmarks, rendered_raw, \
                        face_proj, background_proj, temp_proj2_out_2, sym_weight)
        else:
            print('> Landmark not detected for this image...')
Exemple #13
0
def get_video_points(
        video_file,
        predictor_path='dlib_models/shape_predictor_68_face_landmarks.dat',
        model3D_path='frontalization_models/model3Ddlib.mat',
        eyemask_path='frontalization_models/eyemask.mat',
        mode='face',
        resize=False):

    time_list = []
    points_list = []
    points_list_sym = []
    points_list_raw = []

    model3D = frontalize.ThreeD_Model(model3D_path, 'model_dlib')
    eyemask = np.asarray(io.loadmat(eyemask_path)['eyemask'])
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    cap = cv2.VideoCapture(video_file)
    fps = cap.get(cv2.CAP_PROP_FPS)
    print('fps=', fps)

    # width = 320
    # height = 320

    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # video_sym = cv2.VideoWriter('video_sym.avi', fourcc, fps, (width, height))
    # video_raw = cv2.VideoWriter('video_raw.avi', fourcc, fps, (width, height))

    while True:
        try:

            ret, frame = cap.read()
            if not ret:
                print("no ret")
                break

            msec = cap.get(cv2.CAP_PROP_POS_MSEC)
            print(msec / 1000)

            img = frame
            iheight, iwidth, layers = img.shape

            #resize
            if resize == True:
                new_h = round(iheight / 3)
                new_w = round(iwidth / 3)
                img = cv2.resize(img, (new_w, new_h))

            lmarks = feature_detection.get_landmarks(img, detector, predictor)

            if len(lmarks) != 1:
                continue

            proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                model3D, lmarks[0])

            frontal_raw, frontal_sym = frontalize.frontalize(
                img, proj_matrix, model3D.ref_U, eyemask)

            time_list.append(msec)

            new_img_sym = np.round(frontal_sym).astype(np.uint8)
            new_img_raw = np.round(frontal_raw).astype(np.uint8)
            temp_pts = []
            lmarks_sym = feature_detection.get_landmarks(
                new_img_sym, detector, predictor)
            temp_pts = [[x, y] for x, y in lmarks_sym[0]]

            if mode == 'mouth':
                temp_pts = temp_pts[33:34] + temp_pts[48:]

            points_list_sym.append(temp_pts)

            # video_sym.write(new_img_sym)
            # video_raw.write(new_img_raw)

        except ValueError as e:
            print(e)
            break

    cap.release()
    points_list = list(zip(time_list, points_list_sym))

    with shelve.open(
            'extracted_data/' + video_file.split('/')[-1].split('.')[0] +
            '.txt', 'c') as db:
        for i, x in enumerate(points_list):
            db[str(i)] = x

    return (points_list)
Exemple #14
0
def myfrontalize(X, limit=0):
    count = 0

    if (limit == 0):
        limit = X.shape[0]

    print("Total Images: ", limit)

    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()

    # load detections performed by dlib library on 3D model and Reference Image
    model3D = frontalize.ThreeD_Model(
        this_path + "/frontalization_models/model3Ddlib.mat", 'model_dlib')

    # load mask to exclude eyes from symmetry
    eyemask = np.asarray(
        io.loadmat('frontalization_models/eyemask.mat')['eyemask'])

    for i in range(0, limit):
        print("\r", end='')
        print("Images Completed: {0}".format(i), end='', flush=True)

        # cast img to type int for cv2
        img = X[i, :, :, 0].astype(np.uint8)

        # create a color version for frontalizer stuffs
        c_img = np.copy(img)
        c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)

        # extract landmarks from the query image
        # list containing a 2D array with points (x, y) for each face detected in the query image
        lmarks = feature_detection.get_landmarks(c_img)
        if type(lmarks) is np.ndarray:

            # perform camera calibration according to the first face detected
            proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                model3D, lmarks[0])

            # perform frontalization and convert result to grayscale
            frontal_raw, frontal_sym = frontalize.frontalize(
                c_img, proj_matrix, model3D.ref_U, eyemask)
            temp = cv2.cvtColor(frontal_raw, cv2.COLOR_BGR2GRAY)

            # find nonzero bbox and crop image to remove uncessesary black space from edges
            temp_mask = cv2.findNonZero(temp)
            t_x, t_y, t_w, t_h = cv2.boundingRect(temp_mask)
            t_bbox = temp[t_y:t_y + t_h, t_x:t_x + t_w]

            # resize the cropped image to the appropriate dimensions for network
            t_bbox = cv2.resize(t_bbox, dsize=(48, 48))
            t_bbox = np.resize(t_bbox, (48, 48, 1))
            X[i] = t_bbox.astype(np.float32)

            plt.show()
            count += 1

    print()
    print('{} images out of {} were frontalized.'.format(count, limit))
def face_detect(image_file_path, image_save_path):
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2

    # load detections performed by dlib library on 3D model and Reference Image
    # load query image
    #print image_file_path
    img = cv2.imread(image_file_path, 1)

    (ori_w, org_h, org_channel) = img.shape

    check.check_dlib_landmark_weights()
    lmarks = feature_detection.get_landmarks(img)

    if lmarks.shape[0] == 0:
        return 0
    else:
        print image_save_path
        if not os.path.exists(image_save_path):
            os.makedirs(image_save_path)
        for num_face in range(0, lmarks.shape[0]):
            face_landmarks = lmarks[num_face, :, :]
            print "Detect face %d out of total %d faces" % (
                (num_face + 1), lmarks.shape[0])
            print "start to align face"
            w = 256
            h = 256

            eyecornerDst = [(np.int(0.25 * w), np.int(h / 3.5)),
                            (np.int(0.75 * w), np.int(h / 3.5)),
                            (np.int(0.5 * w), np.int(h / 2))]
            eyecornerSrc = [
                (np.int(face_landmarks[36][0]), np.int(face_landmarks[36][1])),
                (np.int(face_landmarks[45][0]), np.int(face_landmarks[45][1])),
                (np.int(face_landmarks[30][0]), np.int(face_landmarks[30][1]))
            ]

            # Apply similarity transformation
            tform = similarityTransform(eyecornerSrc, eyecornerDst)

            img_face_aligned = cv2.warpAffine(img, tform, (w, h))
            #note that x is horizontal, is width, while y is height, is vertical
            #print tform

            width = np.int(
                (face_landmarks[16][0] - face_landmarks[0][0]) * 1.3)
            height = np.int(
                (face_landmarks[8][1] - face_landmarks[19][1]) * 1.3)
            if width < 0 | height < 0:
                print("face crooked")
                sys.exit()

            left_top_x = np.int(face_landmarks[0][0]) - np.int(
                (np.int(face_landmarks[16][0]) - np.int(face_landmarks[0][0]))
                * 0.15)
            left_top_y = np.int(face_landmarks[19][1]) - np.int(
                (np.int(face_landmarks[8][1]) - np.int(face_landmarks[19][1]))
                * 0.15)
            if left_top_x < 0:
                left_top_x = 0
            if left_top_y < 0:
                left_top_y = 0
            image_new_name_affined = image_save_path + '/A_' + str(
                left_top_x) + '_' + str(left_top_y) + '_' + str(
                    width) + '_' + str(height) + '.jpg'

            resized_image = cv2.resize(img_face_aligned, (256, 256))

            cv2.imwrite(image_new_name_affined, resized_image)

        return 1
 def get_landmark_2d(self, im):
     return feature_detection.get_landmarks(im, detector, predictor)