def get_3d_pkl_lrw(
    pkl,
    root,
    bbb=0
):  # the first cell is video path the last cell is the key frame nnuumber

    # ---- init PRN
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    _file = open(pkl, "rb")
    data = pickle.load(_file)
    _file.close()
    gg = len(data)

    data = data[int(gg * 1 * (bbb)):int(gg * 1 * (bbb + 1))]
    for kk, item in enumerate(data):
        print(kk)
        print(item)
        if os.path.exists(item[0] + '_original.obj'):
            continue
        target_id = item[-1]

        img_path = item[0] + '_%05d.png' % target_id
        print(img_path)
        target_frame = cv2.imread(img_path)
        target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

        image = target_frame
        # read image
        [h, w, c] = image.shape

        pos = prn.process(image)  # use dlib to detect face

        image = image / 255.
        if pos is None:
            print('+++++')
            continue

        # landmark
        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = h - kpt[:, 1]

        np.save(item[0] + '_prnet.npy', kpt)
        # 3D vertices
        vertices = prn.get_vertices(pos)

        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        write_obj_with_colors(item[0] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
Ejemplo n.º 2
0
def prnetSwap(image, ref_image, numFaces):

    prn = PRN(is_dlib=True, prefix='Code/prnet/')
    [h, w, _] = image.shape
    posList = []

    if (numFaces == 1):
        # get the landmarks
        pos1 = prn.process(image, 0)
        pos2 = prn.process(ref_image, 0)

        posList.append(pos1)
        posList.append(pos2)

        if (posList is None) or (pos1 is None) or (pos2 is None):
            return None, image
        elif len(posList) == 2:
            output = prnetOne(prn, image, ref_image, posList[0], posList[1], h,
                              w)
        else:
            return None, image
        return posList, output

    else:
        # get the landmarks
        pos1 = prn.process(image, 0)
        pos2 = prn.process(image, 1)

        posList.append(pos1)
        posList.append(pos2)

        if (posList is None) or (pos1 is None) or (pos2 is None):
            return None, image

        elif len(posList) == 2:
            output = prnetSwapOneFace(prn, image, ref_image, posList[0],
                                      posList[1], h, w)
            output = prnetSwapOneFace(prn, output, ref_image, posList[1],
                                      posList[0], h, w)
        else:
            return None, image

        return posList, output
def get_3d_single(video_path=None, target_id=None, img_path=None):
    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    if video_path != None:
        if not os.path.exists(video_path):
            print(video_path)
            print('+++++')
        if os.path.exists(video_path[:-4] + '.obj'):
            print('-----')
        cap = cv2.VideoCapture(video_path)
        for i in range(target_id):
            ret, frame = cap.read()
        ret, target_frame = cap.read()
        cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame)
    elif img_path != None:
        target_frame = cv2.imread(img_path)
    target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

    image = target_frame
    # read image
    [h, w, c] = image.shape

    pos = prn.process(image)  # use dlib to detect face

    image = image / 255.
    # landmark
    kpt = prn.get_landmarks(pos)
    kpt[:, 1] = h - kpt[:, 1]
    if video_path != None:
        np.save(video_path[:-4] + '_prnet.npy', kpt)
    else:
        np.save(img_path[:-4] + '_prnet.npy', kpt)
    # 3D vertices
    vertices = prn.get_vertices(pos)
    #
    save_vertices = vertices.copy()
    save_vertices[:, 1] = h - 1 - save_vertices[:, 1]
    # corresponding colors
    colors = prn.get_colors(image, vertices)

    if video_path != None:
        write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
        print('The generated 3d mesh model is stored in ' + video_path[:-4] +
              '_original.obj')
    else:
        write_obj_with_colors(img_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
        print('The generated 3d mesh model is stored in ' + img_path[:-4] +
              '_original.obj')
Ejemplo n.º 4
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):
        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            cropped_image, tform, resolution_np = prn.process(
                image)  # use dlib to detect face
        else:  #without dlib should set the parameters of imageinfo
            if image.shape[1] == image.shape[2]:
                cropped_image, tform, resolution_np = resize(image, (256, 256))
                #pos = prn.net_forward(image/255.)             # input image has been cropped to 256x256,and set to be 0-1 values
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                cropped_image, tform, resolution_np = prn.process(image, box)
        imsave(os.path.join(save_folder, name + '_pre_processed.jpg'), image)
        np.savetxt(os.path.join(save_folder, 'tform.txt'), tform)
def get_3d_single_video(
        img_path):  # you need the image path of the most visible frame.
    # root =
    # ---- init PRN
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    # _file = open(pkl, "rb")
    # data = pickle.load(_file)
    # _file.close()
    # gg = len(data)

    # data = data[int(gg * 0.2 *( bbb) ): int(gg * 0.2 * (bbb + 1) ) ]
    # for kk ,item in enumerate(data) :

    print(img_path)
    target_frame = cv2.imread(img_path)
    target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

    image = target_frame
    # read image
    [h, w, c] = image.shape

    pos = prn.process(image)  # use dlib to detect face

    image = image / 255.
    if pos is None:
        print('No pos')

    # landmark
    kpt = prn.get_landmarks(pos)
    kpt[:, 1] = h - kpt[:, 1]

    np.save(img_path[:-11] + '__prnet.npy', kpt)
    # 3D vertices
    vertices = prn.get_vertices(pos)

    save_vertices = vertices.copy()
    save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

    # corresponding colors
    colors = prn.get_colors(image, vertices)

    # print (colors.shape)
    # print ('=========')
    # cv2.imwrite('./mask.png', colors * 255)
    write_obj_with_colors(img_path[:-11] + '__original.obj', save_vertices,
                          prn.triangles,
                          colors)  #save 3d face(can open with meshlab)
Ejemplo n.º 6
0
def get_3d(bbb):
    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)

    # ------------- load data
    # frame_id = "test_video/id00419/3U0abyjM2Po/00024"
    # mesh_file = os.path.join(root, frame_id + ".obj")
    # rt_file = os.path.join(root, frame_id + "_sRT.npy")
    # image_path

    # _file = open(os.path.join(root, 'txt',  "front_rt.pkl"), "rb")
    # data = pickle._Unpickler(_file)
    # data.encoding = 'latin1'
    # data = data.load()
    _file = open(os.path.join(root, 'txt', "front_rt.pkl"), "rb")
    data = pickle.load(_file)
    _file.close()
    gg = len(data)
    print(len(data))
    data = data[int(gg * 0.1 * bbb):int(gg * 0.1 * (bbb + 1))]
    for kk, item in enumerate(data):
        print(kk)

        target_id = item[-1]
        video_path = os.path.join(root, 'unzip', item[0] + '.mp4')
        if not os.path.exists(video_path):
            print(video_path)
            print('+++++')
            continue
        if os.path.exists(video_path[:-4] + '.obj'):
            print('-----')
            continue
        cap = cv2.VideoCapture(video_path)
        for i in range(target_id):
            ret, frame = cap.read()
        ret, target_frame = cap.read()
        cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame)
        target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

        image = target_frame
        # read image
        [h, w, c] = image.shape

        pos = prn.process(image)  # use dlib to detect face

        image = image / 255.
        if pos is None:
            continue

        # landmark
        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = 224 - kpt[:, 1]

        np.save(video_path[:-4] + '_prnet.npy', kpt)
        # 3D vertices
        vertices = prn.get_vertices(pos)
        # save_vertices, p = frontalize(vertices)
        # np.save(video_path[:-4] + '_p.npy', p)
        # if os.path.exists(video_path[:-4] + '.obj'):
        #     continue
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  # save 3d face(can open with meshlab)
Ejemplo n.º 7
0
def get_3d_single(video_path=None,
                  target_id=None,
                  img_path=None,
                  device_id='3'):
    # ---- init PRN
    target_id = count_frames(video_path)
    os.environ['CUDA_VISIBLE_DEVICES'] = device_id  # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    # if video_path != None:
    #     if not os.path.exists(video_path):
    #         print (video_path)
    #         print ('+++++')
    #     if os.path.exists(video_path[:-4] + '.obj'):
    #         print ('-----')
    #     cap = cv2.VideoCapture(video_path)
    #     for i in range(target_id):
    #         ret, frame = cap.read()
    #     ret, target_frame = cap.read()
    #     cv2.imwrite(video_path[:-4] + '_%05d.png'%target_id,target_frame)
    # elif img_path != None:
    #     target_frame = cv2.imread(img_path)
    # target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

    cap = cv2.VideoCapture(video_path)
    for i in range(target_id):
        ret, frame = cap.read()
        print(target_path[:-4] + '_%05d.png' % i)
        cv2.imwrite(target_path[:-4] + '_%05d.png' % i, frame)
        # tt = cv2.imread(target_path[:-4] + '_%05d.png' % i)
        # target_frame = cv2.cvtColor(cv2.imread(target_path[:-4] + '_%05d.png' % i), cv2.COLOR_BGR2RGB)
        target_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = target_frame

        [h, w, c] = image.shape

        pos = prn.process(image)
        image = image / 255

        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = 224 - kpt[:, 1]
        if video_path is not None:
            print(target_path[:-4] + '_%05d' % i + '_prnet.npy')
            np.save(target_path[:-4] + '_%05d' % i + '_prnet.npy', kpt)
        else:
            np.save(img_path[:-4] + '_%05d' % i + '_prnet.npy', kpt)

        vertices = prn.get_vertices(pos)
        # save_vertices, p = frontalize(vertices)
        # np.save(video_path[:-4] + '_p.npy', p)
        # if os.path.exists(video_path[:-4] + '.obj'):
        #     continue
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        if video_path != None:
            write_obj_with_colors(
                target_path[:-4] + '_%05d' % i + '_original.obj',
                save_vertices, prn.triangles,
                colors)  # save 3d face(can open with meshlab)
        else:
            write_obj_with_colors(
                img_path[:-4] + '_%05d' % i + '_original.obj', save_vertices,
                prn.triangles, colors)  # save 3d face(can open with meshlab)
Ejemplo n.º 8
0
def main(args):
    if args.isShow:
        args.isOpencv = True
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box
    if args.isObj:
        from utils.write import write_obj
    if args.isPose:
        from utils.estimate_pose import estimate_pose

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU

    prn = PRN(is_dlib=args.isDlib, is_opencv=args.isOpencv)

    # ------------- load data
    image_folder = args.inputFolder
    save_folder = args.outputFolder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)

        # the core: regress position map
        pos = prn.process(image)  # use dlib to detect face

        if args.isObj or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            # corresponding colors
            colors = prn.get_colors(image, vertices)
            write_obj(os.path.join(save_folder,
                                   name + '.obj'), vertices, colors,
                      prn.triangles)  #save 3d face(can open with meshlab)

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)

        if args.isShow:
            # ---------- Plot
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            cv2.imshow('dense alignment', plot_vertices(image, vertices))
            cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            cv2.waitKey(0)
Ejemplo n.º 9
0
            spoof_probability = spoof_count / total_count

            live_probability = round(live_probability, 1)
            spoof_probability = round(spoof_probability, 1)

            print('live_probability', live_probability)
            print('spoof_probability', spoof_probability)

            image = cv2.resize(image, (224, 224))
            image_shape = np.shape(image)
            print(np.shape(image))
            [h, w, c] = image_shape
            if c > 3:
                image = image[:, :, :3]
            print(np.shape(image))
            pos = prn.process(image)
            vertices = prn.get_vertices(pos)
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            # print(vertices)
            imsave('depth_image.jpg', depth_image)
            load_image = load_img('depth_image.jpg', target_size=(224, 224))
            load_image = img_to_array(load_image)

            load_image = load_image.reshape(
                (1, load_image.shape[0], load_image.shape[1],
                 load_image.shape[2]))
            # # prepare the image for the VGG model
            load_image = preprocess_input(load_image)
            # get features
            vgg_prediction = model.predict(load_image, verbose=0)
            print('Prediction VGG', vgg_prediction)
Ejemplo n.º 10
0
def main(args):
    #---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN(is_dlib = args.isDlib)

    if mode == FAKE:
        dataset_folder_path = "/home/wukong/librealsense/examples/realsense-dataset/attack_dataset"
    elif mode == REAL:
        dataset_folder_path = "/home/wukong/librealsense/examples/realsense-dataset/all_dataset"
    elif mode == PRINT:
        dataset_folder_path = "/home/wukong/librealsense/examples/realsense-dataset/all_dataset"

    dataset_folder_list = os.walk(dataset_folder_path).next()[1]
    train_folder_path = "/home/wukong/anaconda3/dataset/phase7/train"
    train_folder_list = os.walk(train_folder_path).next()[1]
    for fname in dataset_folder_list:
        source_path = os.path.join(dataset_folder_path, fname)
        hs_warp_folder = os.path.join(source_path, "hs_raw_warp")
        rs_color_folder = os.path.join(source_path, "rs_raw_color")
        rs_depth_folder = os.path.join(source_path, "rs_raw_depth")

        hs_warp_list = sorted(glob(os.path.join(hs_warp_folder, '*.jpg')))
        rs_color_list = sorted(glob(os.path.join(rs_color_folder, '*.jpg')))
        rs_depth_list = sorted(glob(os.path.join(rs_depth_folder, '*.jpg')))

        if not hs_warp_list or not rs_color_list or not rs_depth_list:
            print("skip ", source_path)
            continue
        elif len(hs_warp_list) != len(rs_color_list) or len(hs_warp_list) != len(rs_depth_list):
            print("skip ", source_path)
            continue
        else:
            pass

        if mode == FAKE:
            new_fname = "1_" + fname[1:] + "_3_1_4"
        elif mode == REAL:
            new_fname = "1_" + fname[1:] + "_1_1_1"
        elif mode == PRINT:
            new_fname = "1_" + fname[1:] + "_0_0_0"

        if new_fname not in train_folder_list:
            new_path = os.path.join(train_folder_path, new_fname)
            os.mkdir(new_path)
            new_depth_path = os.path.join(new_path, "depth")
            os.mkdir(new_depth_path)
            new_profile_path = os.path.join(new_path, "profile")
            os.mkdir(new_profile_path)
            new_rs_path = os.path.join(new_path, "rs")
            os.mkdir(new_rs_path)
            print("create new folder: {}".format(fname))
        else:
            print("skip {}".format(new_fname))
            continue

        spatial_coordinate_idx = index.Index(properties=p)
        count_num = 1
        total_num = len(hs_warp_list)
        for j in range(total_num):
            if j % 10 == 0:
                print("has processed {} of {} images".format(j, total_num))

            hs_warp_image = imread(hs_warp_list[j])
            [h, w, c] = hs_warp_image.shape
            if c>3:
                hs_warp_image = hs_warp_image[:,:,:3]

            # the core: regress position map
            if args.isDlib:
                max_size = max(hs_warp_image.shape[0], hs_warp_image.shape[1])
                if max_size> 1000:
                    hs_warp_image = rescale(hs_warp_image, 1000./max_size)
                    hs_warp_image = (hs_warp_image*255).astype(np.uint8)
                hs_pos = prn.process(hs_warp_image) # use dlib to detect face
            else:
                if hs_warp_image.shape[0] == hs_warp_image.shape[1]:
                    hs_warp_image = resize(hs_warp_image, (256,256))
                    hs_pos = prn.net_forward(hs_warp_image/255.) # input hs_warp_image has been cropped to 256x256
                else:
                    box = np.array([0, hs_warp_image.shape[1]-1, 0, hs_warp_image.shape[0]-1]) # cropped with bounding box
                    hs_pos = prn.process(hs_warp_image, box)

            hs_warp_image = hs_warp_image/255.
            if hs_pos is None:
                continue
            hs_vertices = prn.get_vertices(hs_pos)

            camera_matrix, euler_pose = estimate_pose(hs_vertices)
            # check similarity with previous pose
            hit = spatial_coordinate_idx.nearest((euler_pose[0], euler_pose[1], euler_pose[2], euler_pose[0], euler_pose[1], euler_pose[2]), 1, objects=True)
            hit = [i for i in hit]
            if hit:
                nearest_euler_pose = np.array(hit[0].bbox[:3])
                current_euler_pose = np.array(euler_pose)
                dist = np.linalg.norm(current_euler_pose - nearest_euler_pose)
                if dist > SPATIAL_THRESHOLD_DEGREE:
                    print("Get a new euler pose {}".format(euler_pose))
                    spatial_coordinate_idx.insert(0,(euler_pose[0], euler_pose[1], euler_pose[2], euler_pose[0], euler_pose[1], euler_pose[2]))
                else:
                    continue
            else:
                print("First euler_pose: {}".format(euler_pose))  
                spatial_coordinate_idx.insert(0,(euler_pose[0], euler_pose[1], euler_pose[2], euler_pose[0], euler_pose[1], euler_pose[2]))

            ##############################################
            #            
            ##############################################

            if mode == FAKE:
                imsave(os.path.join(new_profile_path, ('%04d' % count_num) + '.jpg'), plot_crop(hs_warp_image, hs_vertices))
                rs_depth_image = imread(rs_depth_list[j])
                imsave(os.path.join(new_depth_path, ('%04d' % count_num) + '.jpg'), plot_crop(rs_depth_image, hs_vertices))
            elif mode == PRINT:
                rs_color_image = imread(rs_color_list[j])
                imsave(os.path.join(new_rs_path, ('%04d' % count_num) + '.jpg'), rs_color_image)
            elif mode == REAL:
                rs_color_image = imread(rs_color_list[j])
                [h, w, c] = rs_color_image.shape
                if c>3:
                    rs_color_image = rs_color_image[:,:,:3]

                # the core: regress position map
                if args.isDlib:
                    max_size = max(rs_color_image.shape[0], rs_color_image.shape[1])
                    if max_size> 1000:
                        rs_color_image = rescale(rs_color_image, 1000./max_size)
                        rs_color_image = (rs_color_image*255).astype(np.uint8)
                    rs_pos = prn.process(rs_color_image) # use dlib to detect face
                else:
                    if rs_color_image.shape[0] == rs_color_image.shape[1]:
                        rs_color_image = resize(rs_color_image, (256,256))
                        rs_pos = prn.net_forward(rs_color_image/255.) # input rs_color_image has been cropped to 256x256
                    else:
                        box = np.array([0, rs_color_image.shape[1]-1, 0, rs_color_image.shape[0]-1]) # cropped with bounding box
                        rs_pos = prn.process(rs_color_image, box)

                rs_color_image = rs_color_image/255.
                if rs_pos is None:
                    continue
                rs_vertices = prn.get_vertices(rs_pos)

                rs_depth_image = imread(rs_depth_list[j])
                imsave(os.path.join(new_profile_path, ('%04d' % count_num) + '.jpg'), plot_crop(hs_warp_image, rs_vertices))
                imsave(os.path.join(new_depth_path, ('%04d' % count_num) + '.jpg'), plot_crop(rs_depth_image, rs_vertices))
                imsave(os.path.join(new_rs_path, ('%04d' % count_num) + '.jpg'), plot_crop(rs_color_image, rs_vertices))

            count_num += 1
Ejemplo n.º 11
0
def main(_):
    # init
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
    set_memory_growth()

    # load PRNet model
    cfg = load_yaml(FLAGS.cfg_path)
    model = PRN(cfg, is_dlib=True)

    # evaluation
    if not FLAGS.use_cam:  # on test-img
        print(
            "[*] Processing on images in {}. Press 's' to save result.".format(
                FLAGS.img_path))
        img_paths = glob.glob(os.path.join(FLAGS.img_path, '*'))
        for img_path in img_paths:
            img = cv2.imread(img_path)
            pos = model.process(img_path)
            if pos is None:
                continue

            vertices = model.get_vertices(pos)
            kpt = model.get_landmarks(pos)
            camera_matrix, _ = estimate_pose(vertices)

            cv2.imshow('Input', img)
            cv2.imshow('Sparse alignment', plot_kpt(img, kpt))
            cv2.imshow('Dense alignment', plot_vertices(img, vertices))
            cv2.imshow('Pose', plot_pose_box(img, camera_matrix, kpt))
            cv2.moveWindow('Input', 0, 0)
            cv2.moveWindow('Sparse alignment', 500, 0)
            cv2.moveWindow('Dense alignment', 1000, 0)
            cv2.moveWindow('Pose', 1500, 0)

            key = cv2.waitKey(0)
            if key == ord('q'):
                exit()
            elif key == ord('s'):
                cv2.imwrite(
                    os.path.join(FLAGS.save_path, os.path.basename(img_path)),
                    plot_kpt(img, kpt))
                print("Result saved in {}".format(FLAGS.save_path))

    else:  # webcam demo
        cap = cv2.VideoCapture(0)
        start_time = time.time()
        count = 1
        while (True):
            _, image = cap.read()

            pos = model.process(image)
            fps_str = 'FPS: %.2f' % (1 / (time.time() - start_time))
            start_time = time.time()
            cv2.putText(image, fps_str, (25, 25), cv2.FONT_HERSHEY_DUPLEX,
                        0.75, (0, 255, 0), 2)
            cv2.imshow('Input', image)
            cv2.moveWindow('Input', 0, 0)

            key = cv2.waitKey(1)
            if pos is None:
                cv2.waitKey(1)
                cv2.destroyWindow('Sparse alignment')
                cv2.destroyWindow('Dense alignment')
                cv2.destroyWindow('Pose')
                if key & 0xFF == ord('q'):
                    break
                continue

            else:
                vertices = model.get_vertices(pos)
                kpt = model.get_landmarks(pos)
                camera_matrix, _ = estimate_pose(vertices)

                result_list = [
                    plot_kpt(image, kpt),
                    plot_vertices(image, vertices),
                    plot_pose_box(image, camera_matrix, kpt)
                ]

                cv2.imshow('Sparse alignment', result_list[0])
                cv2.imshow('Dense alignment', result_list[1])
                cv2.imshow('Pose', result_list[2])
                cv2.moveWindow('Sparse alignment', 500, 0)
                cv2.moveWindow('Dense alignment', 1000, 0)
                cv2.moveWindow('Pose', 1500, 0)

                if key & 0xFF == ord('s'):
                    image_name = 'prnet_cam_' + str(count)
                    save_path = FLAGS.save_path

                    cv2.imwrite(
                        os.path.join(save_path, image_name + '_result.jpg'),
                        np.concatenate(result_list, axis=1))
                    cv2.imwrite(
                        os.path.join(save_path, image_name + '_image.jpg'),
                        image)
                    count += 1
                    print("Result saved in {}".format(FLAGS.save_path))

                if key & 0xFF == ord('q'):
                    break
image_folder = '/home/chang/dataset/A_face1'
save_folder = '/home/chang/dataset/A_depth'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

first_dir = os.listdir(image_folder)
for img in first_dir:
    # 二级目录绝对路径
    if img.split(".")[-1] == 'jpg' or img.split(".")[-1] == 'JPG':
        path_image = image_folder + '/' + str(img)
        if int((img.split(".")[0]).split("_")[-2]) > 1:
            #if int((img.split(".")[0]).split("_")[0]):
            #if (img.split(".")[0]).split("(")[0] == 'zheng ':
            image = imread(path_image)
            image_shape = [image.shape[0], image.shape[1]]
            pos = prn.process(image, None, None, image_shape)
            #if all(pos==[None,None,None]):
            if pos is None:
                continue
            kpt = prn.get_landmarks(pos)

            # 3D vertices
            vertices = prn.get_vertices(pos)
            depth_scene_map = DepthImage.generate_depth_image(vertices,
                                                              kpt,
                                                              image.shape,
                                                              isMedFilter=True)
            #cv2.imshow('IMAGE', image[:,:,::-1])
            #cv2.imshow('DEPTH', depth_scene_map)
            #cv2.waitKey(3000)
            save_path = save_folder + '/' + str(img)
Ejemplo n.º 13
0
def main():

    # OpenCV
    #cap = cv2.VideoCapture(args.video_source)
    cap = cv2.VideoCapture('b.mov')
    fps = video.FPS().start()

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    #while True:
    while cap.isOpened():
        ret, frame = cap.read()

        # resize image and detect face
        frame_resize = cv2.resize(frame,
                                  None,
                                  fx=1 / DOWNSAMPLE_RATIO,
                                  fy=1 / DOWNSAMPLE_RATIO)

        # read image
        image = frame_resize
        image = resize(image)

        [h, w, c] = image.shape
        if c > 3:
            image = image[:, :, :3]

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            st = time()
            pos = prn.process(image)  # use dlib to detect face
            print('process', time() - st)
        else:
            if image.shape[0] == image.shape[1]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)

        image = image / 255.
        if pos is None:
            cv2.imshow('a', frame_resize)
            fps.update()
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:, 1] = h - 1 - save_vertices[:, 1]
            #colors = prn.get_colors(image, vertices)
            #write_obj_with_colors(os.path.join('', 'webcam' + '.obj'), save_vertices, prn.triangles, colors)
        #if args.is3d:
        #    # corresponding colors
        #    colors = prn.get_colors(image, vertices)


#
#    if args.isTexture:
#        if args.texture_size != 256:
#            pos_interpolated = resize(pos, (args.texture_size, args.texture_size), preserve_range = True)
#        else:
#            pos_interpolated = pos.copy()
#        texture = cv2.remap(image, pos_interpolated[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
#        if args.isMask:
#            vertices_vis = get_visibility(vertices, prn.triangles, h, w)
#            uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
#            uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range = True)
#            texture = texture*uv_mask[:,:,np.newaxis]
#        #write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
#    else:
#        True
#        #write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
#
#if args.isDepth:
#    depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
#    depth = get_depth_image(vertices, prn.triangles, h, w)
#    #imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
#    #sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth':depth})
#
#if args.isKpt or args.isShow:
#    # get landmarks
#    kpt = prn.get_landmarks(pos)
#    #np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)
#
#if args.isPose or args.isShow:
#    # estimate pose
#    camera_matrix, pose = estimate_pose(vertices)

#write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors)

        rendering_cc = mesh.render.render_grid(save_vertices, prn.triangles,
                                               900, 900)
        a = np.transpose(rendering_cc, axes=[1, 0, 2])
        dim = rendering_cc.shape[0]

        i_t = np.ones([dim, dim, 3], dtype=np.float32)
        for i in range(dim):
            i_t[i] = a[dim - 1 - i]
        i_t = i_t / 255
        #imsave('webcam.png', i_t)

        #kpt = prn.get_landmarks(pos)

        #cv2.imshow('frame', image)
        #cv2.imshow('a',i_t/255)

        #cv2.imshow('sparse alignment', np.concatenate([image, i_t], axis=1))
        cv2.imshow('sparse alignment', i_t)
        cv2.imshow('vedio', image)
        #cv2.imshow('sparse alignment', np.concatenate([plot_kpt(image, kpt), i_t], axis=1))
        #cv2.imshow('dense alignment', plot_vertices(image, vertices))
        #cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))

        fps.update()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 14
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib, is_faceboxes=args.isFaceBoxes)

    # ---- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)
        [h, w, c] = image.shape
        if c > 3: image = image[:, :, :3]  # RGBA图中,去除A通道

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos = prn.process(image)  # use dlib to detect face
        elif args.isFaceBoxes:
            pos, cropped_img = prn.process(
                image)  # use faceboxes to detect face
        else:
            if image.shape[0] == image.shape[1]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)
        image = image / 255.
        if pos is None: continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # 三维人脸旋转对齐方法
        # if args.isImage:
        #     vertices = prn.get_vertices(pos)
        #     scale_init = 180 / (np.max(vertices[:, 1]) - np.min(vertices[:, 1]))
        #     colors = prn.get_colors(image, vertices)
        #     triangles = prn.triangles
        #     camera_matrix, pose = estimate_pose(vertices)
        #     yaw, pitch, roll = pos * ANGULAR
        #     vertices1 = vertices - np.mean(vertices, 0)[np.newaxis, :]
        #
        #     obj = {'s': scale_init, 'angles': [-pitch, yaw, -roll + 180], 't': [0, 0, 0]}
        #     camera = {'eye':[0, 0, 256], 'proj_type':'perspective', 'at':[0, 0, 0],
        #               'near': 1000, 'far':-100, 'fovy':30, 'up':[0,1,0]}
        #
        #     image1 = transform_test(vertices1, obj, camera, triangles, colors, h=256, w=256) * 255
        #     image1 = image1.astype(np.uint8)
        #     imsave(os.path.join(save_folder, name + '.jpg'), image1)

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                if args.texture_size != 256:
                    pos_interpolated = resize(
                        pos, (args.texture_size, args.texture_size),
                        preserve_range=True)
                else:
                    pos_interpolated = pos.copy()
                texture = cv2.remap(image,
                                    pos_interpolated[:, :, :2].astype(
                                        np.float32),
                                    None,
                                    interpolation=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_CONSTANT,
                                    borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h,
                                                  w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                          prn.uv_coords, h, w,
                                          prn.resolution_op)
                    uv_mask = resize(uv_mask,
                                     (args.texture_size, args.texture_size),
                                     preserve_range=True)
                    texture = texture * uv_mask[:, :, np.newaxis]
                write_obj_with_texture(
                    os.path.join(save_folder, name + '.obj'), save_vertices,
                    prn.triangles, texture, prn.uv_coords / prn.resolution_op
                )  #save 3d face with texture(can open with meshlab)
            else:
                write_obj_with_colors(
                    os.path.join(save_folder,
                                 name + '.obj'), save_vertices, prn.triangles,
                    colors)  #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            depth = get_depth_image(vertices, prn.triangles, h, w)
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
            sio.savemat(os.path.join(save_folder, name + '_depth.mat'),
                        {'depth': depth})

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {
                'vertices': vertices,
                'colors': colors,
                'triangles': prn.triangles
            })

        if args.isKpt:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.is2dKpt and args.is68Align:
            ori_kpt = prn.get_landmarks_2d(pos)
            dlib_aligner = DlibAlign()
            dst_img = dlib_aligner.dlib_68_align(image, ori_kpt, 256, 0.5)
            imsave(os.path.join(save_folder, name + '.jpg'), dst_img)

        if args.isPose:
            # estimate pose
            camera_matrix, pose, rot = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'),
                       np.array(pose) * ANGULAR)
            np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'),
                       camera_matrix)

        if args.isShow:
            kpt = prn.get_landmarks(pos)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            # cv2.imshow('dense alignment', plot_vertices(image, vertices))
            # cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            cv2.waitKey(1)
Ejemplo n.º 15
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN(is_dlib = args.isDlib)

    # ------------- load data
    image_folder = args.inputDir
    print(image_folder)
    save_folder = args.outputDir
    print(save_folder)

    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    meta_save_folder = os.path.join(save_folder, 'meta')
    if not os.path.exists(meta_save_folder):
        os.mkdir(meta_save_folder)

    types = ('*.jpg', '*.png', '*,JPG')

    image_path_list= find_files(image_folder, ('.jpg', '.png', '.JPG'))
    total_num = len(image_path_list)
    print(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]
        print(image_path)
        # read image
        image = imread(image_path)
        [h, w, c] = image.shape
        if c>3:
            image = image[:,:,:3]

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size> 1000:
                image = rescale(image, 1000./max_size)
                image = (image*255).astype(np.uint8)
            pos = prn.process(image) # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256,256))
                pos = prn.net_forward(image/255.) # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1]-1, 0, image.shape[0]-1]) # cropped with bounding box
                pos = prn.process(image, box)
        
        image = image/255.
        if pos is None:
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:,1] = h - 1 - save_vertices[:,1]

        if args.isImage:
            imsave(os.path.join(save_folder, name + '.jpg'), image)

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                if args.texture_size != 256:
                    pos_interpolated = resize(pos, (args.texture_size, args.texture_size), preserve_range = True)
                else:
                    pos_interpolated = pos.copy()
                texture = cv2.remap(image, pos_interpolated[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
                    uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range = True)
                    texture = texture*uv_mask[:,:,np.newaxis]
                write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
            else:
                write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            depth = get_depth_image(vertices, prn.triangles, h, w)
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
            sio.savemat(os.path.join(meta_save_folder, name + '_depth.mat'), {'depth':depth})

        if args.isMat:
            sio.savemat(os.path.join(meta_save_folder, name + '_mesh.mat'), {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles})

        if args.isKpt or args.isShow:

            # get landmarks
            kpt = prn.get_landmarks(pos)
            # pdb.set_trace()
            np.save(os.path.join(meta_save_folder, name + '_kpt.npy'), kpt)
            # cv2.imwrite(os.path.join(save_folder, name + '_skpt.jpg'), plot_kpt(image, kpt))

        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(meta_save_folder, name + '_pose.txt'), pose) 
            np.savetxt(os.path.join(meta_save_folder, name + '_camera_matrix.txt'), camera_matrix) 

        if args.isShow:
            # ---------- Plot
            image = imread(os.path.join(save_folder, name + '.jpg'))
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            #cv2.imwrite(os.path.join(save_folder, name + '_pose.jpg'), plot_kpt(image, kpt))
            #cv2.imwrite(os.path.join(save_folder, name + '_camera_matrix.jpg'), plot_vertices(image, vertices))
            #cv2.imwrite(os.path.join(save_folder, name + '_pose.jpg'), plot_pose_box(image, camera_matrix, kpt))
            
            image = imread(os.path.join(save_folder, name + '.jpg'))
            b, g, r = cv2.split(image)
            image = cv2.merge([r,g,b])
Ejemplo n.º 16
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN(is_dlib = args.isDlib)

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    # types = ('*.jpg', '*.png')
    # image_path_list= []
    # for files in types:
    #     image_path_list.extend(glob(os.path.join(image_folder, files)))
    # total_num = len(image_path_list)

    for dir, dirs, files in sorted(os.walk(image_folder)):
        for file in files:
            image_path = os.path.join(dir, file)
            dir = dir.replace("\\", "/")
            new_dir = dir.replace(image_folder, save_folder)
            if not os.path.isdir(new_dir):
                os.mkdir(new_dir)

            name = image_path.replace(image_folder, save_folder)
            print('data path:', name)

            # read image
            image = imread(image_path)
            [h, w, c] = image.shape
            if c>3:
                image = image[:,:,:3]

            # the core: regress position map
            if args.isDlib:
                max_size = max(image.shape[0], image.shape[1])
                if max_size> 1000:
                    image = rescale(image, 1000./max_size)
                    image = (image*255).astype(np.uint8)
                pos = prn.process(image) # use dlib to detect face
            else:
                # if image.shape[0] == image.shape[1]:
                #     image = resize(image, (256,256))
                #     pos = prn.net_forward(image/255.) # input image has been cropped to 256x256
                # else:
                box = np.array([0, image.shape[1]-1, 0, image.shape[0]-1]) # cropped with bounding box
                pos = prn.process(image, box)

            image = image/255.
            if pos is None:
                continue

            if args.is3d or args.isMat or args.isPose or args.isShow:
                # 3D vertices
                vertices = prn.get_vertices(pos)
                if args.isFront:
                    save_vertices = frontalize(vertices)
                else:
                    save_vertices = vertices.copy()
                save_vertices[:,1] = h - 1 - save_vertices[:,1]

            if args.isImage:
                imsave(name, image)

            if args.is3d:
                # corresponding colors
                colors = prn.get_colors(image, vertices)

                if args.isTexture:
                    if args.texture_size != 256:
                        pos_interpolated = resize(pos, (args.texture_size, args.texture_size), preserve_range = True)
                    else:
                        pos_interpolated = pos.copy()
                    texture = cv2.remap(image, pos_interpolated[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
                    if args.isMask:
                        vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                        uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
                        uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range = True)
                        texture = texture*uv_mask[:,:,np.newaxis]
                    write_obj_with_texture(name.replace('.jpg', '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
                else:
                    write_obj_with_colors(name.replace('.jpg', '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
                
                filepath = name.replace('.jpg', '.obj')
                filepath = filepath.replace("\\", "/")
                print('filepath:', filepath)
                new_dir = dir.replace(args.inputDir, args.renderDir)
                # print(new_dir + '/' + file)
                if not os.path.isdir(new_dir):
                    os.mkdir(new_dir)

                color_image1, _ = render_scene(filepath, 4.0, 0.0, 3.0)
                color_image2, _ = render_scene(filepath, 4.0, np.pi / 18.0, 3.0)
                color_image3, _ = render_scene(filepath, 4.0, np.pi / 9.0, 3.0)

                if color_image1 is None or color_image2 is None:
                    continue

                new_path = filepath.replace(args.outputDir, args.renderDir)
                # print('new_path:', new_path)
                save_image(new_path, '_40_', color_image1)
                save_image(new_path, '_50_', color_image2)
                save_image(new_path, '_60_', color_image3)

                os.remove(name.replace('.jpg', '.obj'))

            if args.isDepth:
                depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
                depth = get_depth_image(vertices, prn.triangles, h, w)
                imsave(os.path.join(name.replace('.jpg', '_depth.jpg')), depth_image)
                sio.savemat(name.replace('.jpg', '_depth.mat'), {'depth': depth})

            if args.isMat:
                sio.savemat(name.replace('.jpg', '_mesh.mat'),
                            {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles})

            if args.isKpt or args.isShow:
                # get landmarks
                kpt = prn.get_landmarks(pos)
                np.savetxt(name.replace('.jpg', '_kpt.txt'), kpt)

            if args.isPose or args.isShow:
                # estimate pose
                camera_matrix, pose = estimate_pose(vertices)

                np.savetxt(name.replace('.jpg', '_pose.txt'), pose)
                np.savetxt(name.replace('.jpg', '_camera_matrix.txt'), camera_matrix)

                np.savetxt(name.replace('.jpg', '_pose.txt'), pose)

            if args.isShow:
                # ---------- Plot
                image_pose = plot_pose_box(image, camera_matrix, kpt)
                cv2.imshow('sparse alignment', plot_kpt(image, kpt))
                cv2.imshow('dense alignment', plot_vertices(image, vertices))
                cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
                cv2.waitKey(0)
Ejemplo n.º 17
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN(is_dlib = args.isDlib) 

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list= []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):
        
        name = image_path.strip().split('/')[-1][:-4]
        
        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map    
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1]) 
            if max_size> 1000:
                image = rescale(image, 1000./max_size)
            pos = prn.process(image) # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256,256))
                pos = prn.net_forward(image/255.) # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1]-1, 0, image.shape[0]-1]) # cropped with bounding box
                pos = prn.process(image, box)

        image = image/255.
        if pos is None:
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:        
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices

        if args.isImage:
            imsave(os.path.join(save_folder, name + '.jpg'), image) 

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
                    texture = texture*uv_mask[:,:,np.newaxis]
                write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, colors, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
            else:
                write_obj(os.path.join(save_folder, name + '.obj'), save_vertices, colors, prn.triangles) #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w) 
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image) 

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {'vertices': save_vertices, 'colors': colors, 'triangles': prn.triangles})

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) 
        
        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) 

        if args.isShow:
            # ---------- Plot
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            cv2.imshow('dense alignment', plot_vertices(image, vertices))
            cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            cv2.waitKey(0)
Ejemplo n.º 18
0
def main(args):

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    # image_folder = args.inputDir
    # save_folder = args.outputDir
    # vertices_dir = args.vertDir

    #i.e. d:\source
    base_dir = args.baseDir

    #i.e. d:\characters
    base_save_dir = args.baseSavedir

    #i.e. source\raupach
    scene = args.sceneDir

    #i.e source\raupach\richardson (the target character)
    character = args.characterDir

    #i.e. source\rauapch\richardson\richardson_001
    source_num = args.sourceNum

    #    targ_character = args.targChar
    #i.e. richardson_targ_10
    targ_num = args.targNum

    # something like D:\source\raupach\richardson\raupach_richardson_001
    image_folder = "%s\\%s\\%s\\%s_%s_%s" % (base_dir, scene, character, scene,
                                             character, source_num)
    print(image_folder)

    #something like d:\character\richardson\vertices\richards_t10
    vertices_dir = "%s\\%s\\vertices\\%s_t%s" % (base_save_dir, character,
                                                 character, targ_num)
    print(vertices_dir)

    #something like d:\character\raupach\src\align\raupach_richardson_t10_s001\\obj
    save_folder = "%s\\%s\\src\\align\\%s_%s_s%s_t%s\\obj" % (
        base_save_dir, character, scene, character, source_num, targ_num)
    print(save_folder)

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    # image_path_list= []
    # for root, dirs, files in os.walk('%s' % image_folder):
    #     for file in files:
    #         if file.endswith('.jpg'):
    #             image_path_list.append(file)
    # print (image_path_list)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)
    image_path_list = sorted(image_path_list)
    #print (image_path_list)

    # #repeating the above logic for a vertices directory.
    types = ('*.npy', '*.jpg')
    vert_path_list = []
    for files in types:
        vert_path_list.extend(glob(os.path.join(vertices_dir, files)))
    total_num_vert = len(vert_path_list)
    # vert_path_list.reverse()
    vert_path_list = sorted(vert_path_list)
    #print (vert_path_list)

    for i, image_path in enumerate(image_path_list):
        name = image_path.strip().split('\\')[-1][:-4]

        print("%s aligned with %s" % (image_path_list[i], vert_path_list[i]))

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos = prn.process(image)  # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)

        image = image / 255.
        if pos is None:
            continue

        vertices = prn.get_vertices(pos)
        #takes the nth file in the directory of the vertices to "frontalize" the source image.
        can_vert = vert_path_list[i]
        print(can_vert)
        save_vertices = align(vertices, can_vert)
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        colors = prn.get_colors(image, vertices)

        if args.isTexture:
            texture = cv2.remap(image,
                                pos[:, :, :2].astype(np.float32),
                                None,
                                interpolation=cv2.INTER_NEAREST,
                                borderMode=cv2.BORDER_CONSTANT,
                                borderValue=(0))
            if args.isMask:
                vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                      prn.uv_coords, h, w, prn.resolution_op)
                texture = texture * uv_mask[:, :, np.newaxis]
            write_obj_with_texture(
                os.path.join(save_folder,
                             name + '.obj'), save_vertices, colors,
                prn.triangles, texture, prn.uv_coords / prn.resolution_op
            )  #save 3d face with texture(can open with meshlab)
        else:
            write_obj(os.path.join(save_folder,
                                   name + '.obj'), save_vertices, colors,
                      prn.triangles)  #save 3d face(can open with meshlab)
Ejemplo n.º 19
0
def out_vert(args):

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    # image_folder = args.inputDir
    # print (image_folder)
    #save_folder = args.outputDir
    base_dir = args.baseDir
    character = args.characterDir
    target_num = args.targNum

    #e.g. d:\characters\richardson\face\richardson_t10
    image_folder = "%s\\%s\\face\\%s_t%s" % (base_dir, character, character,
                                             target_num)
    print(image_folder)

    #e.g. d:\characters\richardson\vertices\richardson_t10
    save_folder = "%s\\%s\\vertices\\%s_t%s" % (base_dir, character, character,
                                                target_num)
    print(save_folder)

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)
    print(total_num)

    for i, image_path in enumerate(image_path_list):
        name = image_path.strip().split('\\')[-1][:-4]
        print(image_path)
        print(name)

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos = prn.process(image)  # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)

        image = image / 255.
        if pos is None:
            continue

        vertices = prn.get_vertices(pos)
        np.save("%s/%s" % (save_folder, name), vertices)
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]
def get_3d_folder(
    pkl
):  # the first cell is video path the last cell is the key frame nnuumber

    # ---- init PRN
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    _file = open(pkl, "rb")
    data = pickle.load(_file)
    _file.close()
    gg = len(data)
    print(len(data))
    # data = data[int(gg * 0.1 *bbb ): int(gg * 0.1 * (bbb + 1) ) ]
    for kk, item in enumerate(data):
        print(kk)

        target_id = item[-1]
        video_path = os.path.join(root, 'unzip', item[0])
        if not os.path.exists(video_path):
            print(video_path)
            print('+++++')
            continue
        if os.path.exists(video_path[:-4] + '.obj'):
            print('-----')
            continue
        cap = cv2.VideoCapture(video_path)
        for i in range(target_id):
            ret, frame = cap.read()
        ret, target_frame = cap.read()
        cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame)
        target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

        image = target_frame
        # read image
        [h, w, c] = image.shape

        pos = prn.process(image)  # use dlib to detect face

        image = image / 255.
        if pos is None:
            continue

        # landmark
        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = 224 - kpt[:, 1]

        np.save(video_path[:-4] + '_prnet.npy', kpt)
        # 3D vertices
        vertices = prn.get_vertices(pos)
        # save_vertices, p = frontalize(vertices)
        # np.save(video_path[:-4] + '_p.npy', p)
        # if os.path.exists(video_path[:-4] + '.obj'):
        #     continue
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
Ejemplo n.º 21
0
        #print('3: ',len(kpt3))
        ptso = []
        for i in range(len(kpt1)):
            ptso.append(kpt1[i])
        for i in range(len(kpt2)):
            ptso.append(kpt2[i])
        for i in range(len(kpt3)):
            ptso.append(kpt3[i])
        pts_n = np.array(ptso)

        # cv.imshow('ori',image)
        # cv.waitKey(0)
        img_ori = cv.flip(image,0)
        image = RotateClockWise90(img_ori)

        pos = prn.process(image,pts_n) # use dlib to detect face
        # for i in range(len(pts_n)):
        #     center = list(pts_n[i])
        #     #print(center)
        #     cv.circle(image,(int(center[0]),int(center[1])),2,(100,100,100),2)
        # cv.imshow('pic',image)
        # cv.waitKey(0)
        # cv.imshow('pic',picture)
        # cv.waitKey(0)
        # -- Basic Applications
        # get landmarks
        kpt = prn.get_landmarks(pos)

        # for i in range(len(pts_n)):
        #     center = list(pts_n[i])
        #     #print(center)
Ejemplo n.º 22
0
for video_path in video_paths:
    print(video_path)
    if video_path in videos:
        continue
    cap = cv2.VideoCapture(video_path)
    frame_number = 0
    while cap.isOpened():
        if frame_number % 8 != 0:
            frame_number += 1
            continue
        ret, image = cap.read()
        if not ret:
            break
        image_shape = [image.shape[0], image.shape[1]]
        try:
            pos, bbox = prn.process(image, None, None, image_shape)
            if bbox is None:
                continue
            # kpt = prn.get_landmarks(pos)
            x1 = max(bbox[0][0] - 32, 0)
            y1 = max(bbox[0][1] - 32, 0)
            x2 = min(bbox[0][2] + 32, image_shape[1])
            y2 = min(bbox[0][3] + 32, image_shape[0])

            # 3D vertices
            # vertices = prn.get_vertices(pos)
            name = str(round(time.time() * 1000)) + "-" + str(frame_number)
            face_name = name + ".bmp"
            # depth_name = name + ".jpg"
            # depth_scene_map = DepthImage.generate_depth_image(vertices, kpt, image.shape, isMedFilter=True)
            face = image[y1:y2, x1:x2]
Ejemplo n.º 23
0
while True:
    # name = image_path.strip().split('/')[-1][:-4]

    # read image
    ret, image = cap.read()
    [h, w, c] = image.shape
    if c>3:
        image = image[:,:,:3]

    # the core: regress position map
    if args.isDlib:
        max_size = max(image.shape[0], image.shape[1])
        if max_size> 1000:
            image = rescale(image, 1000./max_size)
            image = (image*255).astype(np.uint8)
        pos = prn.process(image) # use dlib to detect face
    else:
        if image.shape[0] == image.shape[1]:
            image = resize(image, (256,256))
            pos = prn.net_forward(image/255.) # input image has been cropped to 256x256
        else:
            box = np.array([0, image.shape[1]-1, 0, image.shape[0]-1]) # cropped with bounding box
            pos = prn.process(image, box)
    
    image = image/255.
    if pos is None:
        continue

    if args.is3d or args.isMat or args.isPose or args.isShow:
        # 3D vertices
        vertices = prn.get_vertices(pos)
Ejemplo n.º 24
0
def main(args):
    if args.isShow or args.isTexture or args.isCamera:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    if args.isCamera:

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(0)

        # Check if camera opened successfully
        if (cap.isOpened() == False):
            print("Error opening video stream or file")

        # Read until video is completed
        while (cap.isOpened()):
            # Capture frame-by-frame
            ret, frame = cap.read()
            if ret == True:

                if args.isDlib:
                    max_size = max(frame.shape[0], frame.shape[1])
                    if max_size > 1000:
                        frame = rescale(frame, 1000. / max_size)
                        frame = (frame * 255).astype(np.uint8)
                    pos = prn.process(frame)  # use dlib to detect face
                else:
                    if frame.shape[0] == frame.shape[1]:
                        frame = resize(frame, (256, 256))
                        pos = prn.net_forward(
                            frame /
                            255.)  # input frame has been cropped to 256x256
                    else:
                        box = np.array(
                            [0, frame.shape[1] - 1, 0,
                             frame.shape[0] - 1])  # cropped with bounding box
                        pos = prn.process(frame, box)
                # Normalizing the frame and skiping if there was no one in the frame
                frame = frame / 255.
                if pos is None:
                    continue
                # Get landmarks in frame
                kpt = prn.get_landmarks(pos)

                # Display the resulting frame
                cv2.imshow('sparse alignment', plot_kpt(frame, kpt))

                # Press Q on keyboard to  exit
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break

            # Break the loop
            else:
                break

        # When everything done, release the video capture object
        cap.release()

        # Closes all the frames
        cv2.destroyAllWindows()

    else:
        for i, image_path in enumerate(image_path_list):

            name = image_path.strip().split('/')[-1][:-4]

            # read image
            image = imread(image_path)
            [h, w, c] = image.shape
            if c > 3:
                image = image[:, :, :3]

            # the core: regress position map
            if args.isDlib:
                max_size = max(image.shape[0], image.shape[1])
                if max_size > 1000:
                    image = rescale(image, 1000. / max_size)
                    image = (image * 255).astype(np.uint8)
                pos = prn.process(image)  # use dlib to detect face
            else:
                if image.shape[0] == image.shape[1]:
                    image = resize(image, (256, 256))
                    pos = prn.net_forward(
                        image /
                        255.)  # input image has been cropped to 256x256
                else:
                    box = np.array(
                        [0, image.shape[1] - 1, 0,
                         image.shape[0] - 1])  # cropped with bounding box
                    pos = prn.process(image, box)

            image = image / 255.
            if pos is None:
                continue

            if args.is3d or args.isMat or args.isPose or args.isShow:
                # 3D vertices
                vertices = prn.get_vertices(pos)
                if args.isFront:
                    save_vertices = frontalize(vertices)
                else:
                    save_vertices = vertices.copy()
                save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

            if args.isImage:
                imsave(os.path.join(save_folder, name + '.jpg'), image)

            if args.is3d:
                # corresponding colors
                colors = prn.get_colors(image, vertices)

                if args.isTexture:
                    if args.texture_size != 256:
                        pos_interpolated = resize(
                            pos, (args.texture_size, args.texture_size),
                            preserve_range=True)
                    else:
                        pos_interpolated = pos.copy()
                    texture = cv2.remap(image,
                                        pos_interpolated[:, :, :2].astype(
                                            np.float32),
                                        None,
                                        interpolation=cv2.INTER_LINEAR,
                                        borderMode=cv2.BORDER_CONSTANT,
                                        borderValue=(0))
                    if args.isMask:
                        vertices_vis = get_visibility(vertices, prn.triangles,
                                                      h, w)
                        uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                              prn.uv_coords, h, w,
                                              prn.resolution_op)
                        uv_mask = resize(
                            uv_mask, (args.texture_size, args.texture_size),
                            preserve_range=True)
                        texture = texture * uv_mask[:, :, np.newaxis]
                    write_obj_with_texture(
                        os.path.join(save_folder, name + '.obj'),
                        save_vertices, prn.triangles, texture,
                        prn.uv_coords / prn.resolution_op
                    )  #save 3d face with texture(can open with meshlab)
                else:
                    write_obj_with_colors(
                        os.path.join(save_folder, name + '.obj'),
                        save_vertices, prn.triangles,
                        colors)  #save 3d face(can open with meshlab)

            if args.isDepth:
                depth_image = get_depth_image(vertices, prn.triangles, h, w,
                                              True)
                depth = get_depth_image(vertices, prn.triangles, h, w)
                imsave(os.path.join(save_folder, name + '_depth.jpg'),
                       depth_image)
                sio.savemat(os.path.join(save_folder, name + '_depth.mat'),
                            {'depth': depth})

            if args.isMat:
                sio.savemat(
                    os.path.join(save_folder, name + '_mesh.mat'), {
                        'vertices': vertices,
                        'colors': colors,
                        'triangles': prn.triangles
                    })

            if args.isKpt or args.isShow:
                # get landmarks
                kpt = prn.get_landmarks(pos)
                np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

            if args.isPose or args.isShow:
                # estimate pose
                camera_matrix, pose = estimate_pose(vertices)
                np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)
                np.savetxt(
                    os.path.join(save_folder, name + '_camera_matrix.txt'),
                    camera_matrix)

                np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)

            if args.isShow:
                # ---------- Plot
                image_pose = plot_pose_box(image, camera_matrix, kpt)
                cv2.imshow(
                    'sparse alignment',
                    cv2.cvtColor(np.float32(plot_kpt(image, kpt)),
                                 cv2.COLOR_RGB2BGR))
                cv2.imshow(
                    'dense alignment',
                    cv2.cvtColor(np.float32(plot_vertices(image, vertices)),
                                 cv2.COLOR_RGB2BGR))
                cv2.imshow(
                    'pose',
                    cv2.cvtColor(
                        np.float32(plot_pose_box(image, camera_matrix, kpt)),
                        cv2.COLOR_RGB2BGR))
                cv2.waitKey(0)
Ejemplo n.º 25
0
image_path_list = []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)

for i, image_path in enumerate(image_path_list):
    # read image
    image = imread(image_path)

    # the core: regress position map
    if 'AFLW2000' in image_path:
        mat_path = image_path.replace('jpg', 'mat')
        info = sio.loadmat(mat_path)
        kpt = info['pt3d_68']
        pos = prn.process(
            image, kpt
        )  # kpt information is only used for detecting face and cropping image
    else:
        pos = prn.process(image)  # use dlib to detect face

    # -- Basic Applications
    # get landmarks
    kpt = prn.get_landmarks(pos)
    # 3D vertices
    vertices = prn.get_vertices(pos)
    # corresponding colors
    colors = prn.get_colors(image, vertices)

    # -- save
    name = image_path.strip().split('/')[-1][:-4]
    np.savetxt(os.path.join(save_folder, name + '.txt'), kpt)
Ejemplo n.º 26
0
types = ('*.jpg', '*.png')
image_path_list= []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)

for i, image_path in enumerate(image_path_list):
    # read image
    image = imread(image_path)

    # the core: regress position map    
    if 'AFLW2000' in image_path:
        mat_path = image_path.replace('jpg', 'mat')
        info = sio.loadmat(mat_path)
        kpt = info['pt3d_68']
        pos = prn.process(image, kpt) # kpt information is only used for detecting face and cropping image
    else:
        pos = prn.process(image) # use dlib to detect face

    # -- Basic Applications
    # get landmarks
    kpt = prn.get_landmarks(pos)
    # 3D vertices
    vertices = prn.get_vertices(pos)
    # corresponding colors
    colors = prn.get_colors(image, vertices)

    # -- save
    name = image_path.strip().split('/')[-1][:-4]
    np.savetxt(os.path.join(save_folder, name + '.txt'), kpt) 
    write_obj(os.path.join(save_folder, name + '.obj'), vertices, colors, prn.triangles) #save 3d face(can open with meshlab)
Ejemplo n.º 27
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    if os.path.isfile(image_folder):
        image_path_list.append(image_folder)
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos, crop_image = prn.process(image)  # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
                crop_image = None
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos, crop_image = prn.process(image, box)

        image = image / 255.
        if pos is None:
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        if args.isImage and crop_image is not None:
            imsave(os.path.join(save_folder, name + '_crop.jpg'), crop_image)
            imsave(os.path.join(save_folder, name + '_orig.jpg'), image)

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                texture = cv2.remap(image,
                                    pos[:, :, :2].astype(np.float32),
                                    None,
                                    interpolation=cv2.INTER_NEAREST,
                                    borderMode=cv2.BORDER_CONSTANT,
                                    borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h,
                                                  w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                          prn.uv_coords, h, w,
                                          prn.resolution_op)
                    texture = texture * uv_mask[:, :, np.newaxis]
                write_obj_with_texture(
                    os.path.join(save_folder,
                                 name + '.obj'), save_vertices, colors,
                    prn.triangles, texture, prn.uv_coords / prn.resolution_op
                )  #save 3d face with texture(can open with meshlab)
            else:
                write_obj(os.path.join(save_folder,
                                       name + '.obj'), save_vertices, colors,
                          prn.triangles)  #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            depth = get_depth_image(vertices, prn.triangles, h, w)
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
            sio.savemat(os.path.join(save_folder, name + '_depth.mat'),
                        {'depth': depth})

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {
                'vertices': vertices,
                'colors': colors,
                'triangles': prn.triangles
            })

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)
            np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'),
                       camera_matrix)

            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)

        if args.isShow:
            # ---------- Plot
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            cv2.imshow('dense alignment', plot_vertices(image, vertices))
            cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            if crop_image is not None:
                cv2.imshow('crop', crop_image)
            cv2.waitKey(0)
Ejemplo n.º 28
0
def getFacialLandmarks(isDlib, img_, numFaces=1):

    img = copy.deepcopy(img_)

    # use dlib or PrNetfor prediction of facial landmarks
    if isDlib == "True":
        # load shape predictor model
        model_path = 'Code/dlib_model/shape_predictor_68_face_landmarks.dat'

        # load the detector and the predictor.
        # predictor accepts pre-trained model as input
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(model_path)

        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        rects = detector(img_gray, 1)

        # store landmark locations of both faces
        landmarkCoordAll = []

        # iterate through the points in both faces
        for r, rect in enumerate(rects):
            landmarks = predictor(img_gray, rect)

            # reshape landmarks to (68X2)
            landmarkCoord = np.zeros((68, 2), dtype='int')

            for i in range(68):
                landmarkCoord[i] = (landmarks.part(i).x, landmarks.part(i).y)
            landmarkCoordAll.append(landmarkCoord)

            # draw bounding box on face
            cv2.rectangle(img, (rect.left(), rect.top()), (rect.right(), rect.bottom()), (0, 255, 255), 0)

            # draw facial landmarks
            img_ = drawFacialLandmarks(img, landmarkCoord)

    if isDlib == "False":
        # prn uses dlib for face detection and its own trained model for prediction of facial landmarks
        prn = PRN(is_dlib = True, prefix='Code/prnet/')

        [h, w, c] = img.shape
        if c>3:
            img = img[:,:,:3]

        if img.shape[0] == img.shape[1]:
            img = resize(img, (256,256))
            pos = prn.net_forward(img/255.) # input image has been cropped to 256x256
        else:
            posList = []
            for i in range(numFaces):
                pos = prn.process(img, i)
                posList.append(pos)

        landmarkCoordAll = []
        for i, pos in enumerate(posList):

            if pos is None:
                return img_, landmarkCoordAll

            # get landmark points of face
            landmarkCoord = prn.get_landmarks(pos)
            img_ = plot_kpt(img_, landmarkCoord)

            landmarkCoord = landmarkCoord[:, 0:2]
            landmarkCoordAll.append(landmarkCoord)

    return img_, landmarkCoordAll