Beispiel #1
0
def demo():
    nSub = opts.getint('general', 'nTotSub')
    fileList, outputFolder = myutil.parse(sys.argv)
    # check for dlib saved weights for face landmark detection
    # if it fails, dowload and extract it manually from
    # http://sourceforge.net/projects/dclib/files/d.10/shape_predictor_68_face_landmarks.dat.bz2
    check.check_dlib_landmark_weights()
    ## Preloading all the models for speed
    allModels = myutil.preload(this_path, pose_models_folder, pose_models,
                               nSub)

    for f in fileList:
        if '#' in f:  #skipping comments
            continue
        splitted = f.split(',')
        image_key = splitted[0]
        image_path = splitted[1]
        image_landmarks = splitted[2]
        img = cv2.imread(image_path, 1)
        if image_landmarks != "None":
            lmark = np.loadtxt(image_landmarks)
            lmarks = []
            lmarks.append(lmark)
        else:
            print('> Detecting landmarks')
            lmarks = feature_detection.get_landmarks(img, this_path)

        if len(lmarks) != 0:
            ## Copy back original image and flipping image in case we need
            ## This flipping is performed using all the model or all the poses
            ## To refine the estimation of yaw. Yaw can change from model to model...
            img_display = img.copy()
            img, lmarks, yaw = myutil.flipInCase(img, lmarks, allModels)
            listPose = myutil.decidePose(yaw, opts, newModels)
            ## Looping over the poses
            for poseId in listPose:
                posee = pose_models[poseId]
                ## Looping over the subjects
                for subj in range(1, nSub + 1):
                    pose = posee + '_' + str(subj).zfill(2) + '.mat'
                    print('> Looking at file: ' + image_path + ' with ' + pose)
                    # load detections performed by dlib library on 3D model and Reference Image
                    print("> Using pose model in " + pose)
                    ## Indexing the right model instead of loading it each time from memory.
                    model3D = allModels[pose]
                    eyemask = model3D.eyemask
                    # perform camera calibration according to the first face detected
                    proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                        model3D, lmarks[0])
                    ## We use eyemask only for frontal
                    if not myutil.isFrontal(pose):
                        eyemask = None
                    ##### Main part of the code: doing the rendering #############
                    rendered_raw, rendered_sym, face_proj, background_proj, temp_proj2_out_2, sym_weight = renderer.render(img, proj_matrix,\
                                                                                             model3D.ref_U, eyemask, model3D.facemask, opts)
                    ########################################################

                    if myutil.isFrontal(pose):
                        rendered_raw = rendered_sym
                    ## Cropping if required by crop_models
                    rendered_raw = myutil.cropFunc(pose, rendered_raw,
                                                   crop_models[poseId])
                    ## Resizing if required
                    if resizeCNN:
                        rendered_raw = cv2.resize(
                            rendered_raw, (cnnSize, cnnSize),
                            interpolation=cv2.INTER_CUBIC)
                    ## Saving if required
                    if opts.getboolean('general', 'saveON'):
                        subjFolder = outputFolder + '/' + image_key.split(
                            '_')[0]
                        myutil.mymkdir(subjFolder)
                        savingString = subjFolder + '/' + image_key + '_rendered_' + pose[
                            8:-7] + '_' + str(subj).zfill(2) + '.jpg'
                        cv2.imwrite(savingString, rendered_raw)

                    ## Plotting if required
                    if opts.getboolean('general', 'plotON'):
                        myutil.show(img_display, img, lmarks, rendered_raw, \
                        face_proj, background_proj, temp_proj2_out_2, sym_weight)
        else:
            print('> Landmark not detected for this image...')
Beispiel #2
0
def render_fpn(inputFile, output_pose_db, outputFolder):
    ## Opening FPN pose db
    pose_env = lmdb.open(output_pose_db, readonly=True)
    pose_cnn_lmdb = pose_env.begin()
    ## looping over images
    with open(inputFile, 'r') as csvfile:
        csvreader = csv.reader(csvfile, delimiter=',')
        lines = csvfile.readlines()
        for lin in lines:
            ### key1, image_path_key_1
            image_key = lin.split(',')[0]
            if 'flip' in image_key:
                continue

            image_path = lin.split(',')[-1].rstrip('\n')
            img = cv2.imread(image_path, 1)
            pose_Rt_raw = pose_cnn_lmdb.get(image_key)
            pose_Rt_flip_raw = pose_cnn_lmdb.get(image_key + '_flip')

            if pose_Rt_raw is not None:
                pose_Rt = np.frombuffer(pose_Rt_raw, np.float32)
                pose_Rt_flip = np.frombuffer(pose_Rt_flip_raw, np.float32)

                yaw = myutil.decideSide_from_db(img, pose_Rt, allModels)

                if yaw < 0:  # Flip image and get the corresponsidng pose
                    img = cv2.flip(img, 1)
                    pose_Rt = pose_Rt_flip

                listPose = myutil.decidePose(yaw, opts, newModels)
                ## Looping over the poses
                for poseId in listPose:
                    posee = pose_models[poseId]
                    ## Looping over the subjects
                    for subj in [10]:
                        pose = posee + '_' + str(subj).zfill(2) + '.mat'
                        print '> Looking at file: ' + image_path + ' with ' + pose
                        # load detections performed by dlib library on 3D model and Reference Image
                        print "> Using pose model in " + pose
                        ## Indexing the right model instead of loading it each time from memory.
                        model3D = allModels[pose]
                        eyemask = model3D.eyemask
                        # perform camera calibration according to the first face detected
                        proj_matrix, camera_matrix, rmat, tvec = calib.estimate_camera(
                            model3D, pose_Rt, pose_db_on=True)
                        ## We use eyemask only for frontal
                        if not myutil.isFrontal(pose):
                            eyemask = None
                        ##### Main part of the code: doing the rendering #############
                        rendered_raw, rendered_sym, face_proj, background_proj, temp_proj2_out_2, sym_weight = renderer_core.render(img, proj_matrix,\
                                                                                                 model3D.ref_U, eyemask, model3D.facemask, opts)
                        ########################################################

                        if myutil.isFrontal(pose):
                            rendered_raw = rendered_sym
                        ## Cropping if required by crop_models
                        #rendered_raw = myutil.cropFunc(pose,rendered_raw,crop_models[poseId])
                        ## Resizing if required
                        #if resizeCNN:
                        #    rendered_raw = cv2.resize(rendered_raw, ( cnnSize, cnnSize ), interpolation=cv2.INTER_CUBIC )
                        ## Saving if required
                        if opts.getboolean('general', 'saveON'):
                            subjFolder = outputFolder + '/' + image_key.split(
                                '_')[0]
                            myutil.mymkdir(subjFolder)
                            savingString = subjFolder + '/' + image_key + '_rendered_' + pose[
                                8:-7] + '_' + str(subj).zfill(2) + '.jpg'
                            cv2.imwrite(savingString, rendered_raw)