Exemplo n.º 1
0
def main(args):
    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN()

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list= []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]
        # read image
        image = imread(image_path)
        [h, w, _] = image.shape
        pos = prn.net_forward(image/255.) # input image has been cropped to 256x256
        imsave(os.path.join(save_folder, name + '.jpg'), image)
Exemplo n.º 2
0
def extract_param(checkpoint_fp, root='', filelists=None, num_classes=62, device_ids=[0],
                  batch_size=1, num_workers=0):
    map_location = {f'cuda:{i}': 'cuda:0' for i in range(8)}
    # checkpoint = torch.load(checkpoint_fp, map_location=map_location)['state_dict']
    torch.cuda.set_device(device_ids[0])
    model = PRN(checkpoint_fp)
    # model = nn.DataParallel(model, device_ids=device_ids).cuda()
    # model.load_state_dict(checkpoint)

    dataset = DDFATestDataset(filelists=filelists, root=root,
                              transform=transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]))
    data_loader = data.DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)

    cudnn.benchmark = True
    # model.eval()

    end = time.time()
    outputs = []
    with torch.no_grad():
        for _, inputs in enumerate(data_loader):
            inputs = inputs.cuda()

            # Get the output landmarks
            pos = model.net_forward(inputs)

            out = pos.cpu().detach().numpy()
            pos = np.squeeze(out)
            cropped_pos = pos * 255
            pos = cropped_pos.transpose(1, 2, 0)

            if pos is None:
                continue

            # print(pos.shape)
            output = model.get_landmarks(pos)
            # print(output.shape)

            outputs.append(output)

        outputs = np.array(outputs, dtype=np.float32)
        print("outputs",outputs.shape)
    print(f'Extracting params take {time.time() - end: .3f}s')
    return outputs
Exemplo n.º 3
0
def prnetSwap(image, ref_image, numFaces):

    prn = PRN(is_dlib=True, prefix='Code/prnet/')
    [h, w, _] = image.shape
    posList = []

    if (numFaces == 1):
        # get the landmarks
        pos1 = prn.process(image, 0)
        pos2 = prn.process(ref_image, 0)

        posList.append(pos1)
        posList.append(pos2)

        if (posList is None) or (pos1 is None) or (pos2 is None):
            return None, image
        elif len(posList) == 2:
            output = prnetOne(prn, image, ref_image, posList[0], posList[1], h,
                              w)
        else:
            return None, image
        return posList, output

    else:
        # get the landmarks
        pos1 = prn.process(image, 0)
        pos2 = prn.process(image, 1)

        posList.append(pos1)
        posList.append(pos2)

        if (posList is None) or (pos1 is None) or (pos2 is None):
            return None, image

        elif len(posList) == 2:
            output = prnetSwapOneFace(prn, image, ref_image, posList[0],
                                      posList[1], h, w)
            output = prnetSwapOneFace(prn, output, ref_image, posList[1],
                                      posList[0], h, w)
        else:
            return None, image

        return posList, output
def get_3d_pkl_lrw(
    pkl,
    root,
    bbb=0
):  # the first cell is video path the last cell is the key frame nnuumber

    # ---- init PRN
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    _file = open(pkl, "rb")
    data = pickle.load(_file)
    _file.close()
    gg = len(data)

    data = data[int(gg * 1 * (bbb)):int(gg * 1 * (bbb + 1))]
    for kk, item in enumerate(data):
        print(kk)
        print(item)
        if os.path.exists(item[0] + '_original.obj'):
            continue
        target_id = item[-1]

        img_path = item[0] + '_%05d.png' % target_id
        print(img_path)
        target_frame = cv2.imread(img_path)
        target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

        image = target_frame
        # read image
        [h, w, c] = image.shape

        pos = prn.process(image)  # use dlib to detect face

        image = image / 255.
        if pos is None:
            print('+++++')
            continue

        # landmark
        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = h - kpt[:, 1]

        np.save(item[0] + '_prnet.npy', kpt)
        # 3D vertices
        vertices = prn.get_vertices(pos)

        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        write_obj_with_colors(item[0] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
Exemplo n.º 5
0
def main(args):

    # deploy GPU environment
    os.environ['CUDA_VISIBLE_DEVICES'] = "0"

    prn = PRN(is_dlib=True)
    print("source video: ", args.src, " reference: ", args.ref)

    # sampling from source video
    cap_s = cv2.VideoCapture(args.src)
    ret_s, frame_s = cap_s.read()
    if ret_s:
        [h, w, _] = frame_s.shape

    # sampling from reference video
    cap_r = cv2.VideoCapture(args.ref)
    ret_r, frame_r = cap_r.read()

    videoWriter = cv2.VideoWriter(args.output, FOURCC, OUTPUT_FPS, (w, h))

    frame_no = 0
    prev_valid_src = frame_s
    prev_valid_ref = frame_r
    while (ret_s and ret_r):
        frame_no += 1

        ret_s, frame_s = cap_s.read()
        ret_r, frame_r = cap_r.read()
        #frame_s = exposure.adjust_gamma(frame_s, 0.67).astype(np.uint8)
        #frame_r = exposure.adjust_gamma(frame_r, 0.5).astype(np.uint8)

        print(frame_no, "processing")

        # main body
        merged_frame, prev_valid_src, prev_valid_ref, s = face_exchanging(
            prn, frame_s, frame_r, h, w, prev_valid_src, prev_valid_ref,
            frame_no)

        if s == 0:
            print("skip one frame.")
            #frame_s = exposure.adjust_gamma(frame_s, 1.5)
            videoWriter.write(frame_s)
            cv2.imwrite("./videos/detect_fail/" + str(frame_no) + ".jpg",
                        frame_s)
            continue

        #merged_frame = exposure.adjust_gamma(merged_frame, 1.5)
        videoWriter.write(merged_frame)

    videoWriter.release()
Exemplo n.º 6
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):
        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            cropped_image, tform, resolution_np = prn.process(
                image)  # use dlib to detect face
        else:  #without dlib should set the parameters of imageinfo
            if image.shape[1] == image.shape[2]:
                cropped_image, tform, resolution_np = resize(image, (256, 256))
                #pos = prn.net_forward(image/255.)             # input image has been cropped to 256x256,and set to be 0-1 values
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                cropped_image, tform, resolution_np = prn.process(image, box)
        imsave(os.path.join(save_folder, name + '_pre_processed.jpg'), image)
        np.savetxt(os.path.join(save_folder, 'tform.txt'), tform)
def get_3d_single(video_path=None, target_id=None, img_path=None):
    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    if video_path != None:
        if not os.path.exists(video_path):
            print(video_path)
            print('+++++')
        if os.path.exists(video_path[:-4] + '.obj'):
            print('-----')
        cap = cv2.VideoCapture(video_path)
        for i in range(target_id):
            ret, frame = cap.read()
        ret, target_frame = cap.read()
        cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame)
    elif img_path != None:
        target_frame = cv2.imread(img_path)
    target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

    image = target_frame
    # read image
    [h, w, c] = image.shape

    pos = prn.process(image)  # use dlib to detect face

    image = image / 255.
    # landmark
    kpt = prn.get_landmarks(pos)
    kpt[:, 1] = h - kpt[:, 1]
    if video_path != None:
        np.save(video_path[:-4] + '_prnet.npy', kpt)
    else:
        np.save(img_path[:-4] + '_prnet.npy', kpt)
    # 3D vertices
    vertices = prn.get_vertices(pos)
    #
    save_vertices = vertices.copy()
    save_vertices[:, 1] = h - 1 - save_vertices[:, 1]
    # corresponding colors
    colors = prn.get_colors(image, vertices)

    if video_path != None:
        write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
        print('The generated 3d mesh model is stored in ' + video_path[:-4] +
              '_original.obj')
    else:
        write_obj_with_colors(img_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
        print('The generated 3d mesh model is stored in ' + img_path[:-4] +
              '_original.obj')
def get_3d_single_video(
        img_path):  # you need the image path of the most visible frame.
    # root =
    # ---- init PRN
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    # _file = open(pkl, "rb")
    # data = pickle.load(_file)
    # _file.close()
    # gg = len(data)

    # data = data[int(gg * 0.2 *( bbb) ): int(gg * 0.2 * (bbb + 1) ) ]
    # for kk ,item in enumerate(data) :

    print(img_path)
    target_frame = cv2.imread(img_path)
    target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

    image = target_frame
    # read image
    [h, w, c] = image.shape

    pos = prn.process(image)  # use dlib to detect face

    image = image / 255.
    if pos is None:
        print('No pos')

    # landmark
    kpt = prn.get_landmarks(pos)
    kpt[:, 1] = h - kpt[:, 1]

    np.save(img_path[:-11] + '__prnet.npy', kpt)
    # 3D vertices
    vertices = prn.get_vertices(pos)

    save_vertices = vertices.copy()
    save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

    # corresponding colors
    colors = prn.get_colors(image, vertices)

    # print (colors.shape)
    # print ('=========')
    # cv2.imwrite('./mask.png', colors * 255)
    write_obj_with_colors(img_path[:-11] + '__original.obj', save_vertices,
                          prn.triangles,
                          colors)  #save 3d face(can open with meshlab)
Exemplo n.º 9
0
def main():

    # OpenCV
    #cap = cv2.VideoCapture(args.video_source)
    cap = cv2.VideoCapture('b.mov')
    fps = video.FPS().start()

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    #while True:
    while cap.isOpened():
        ret, frame = cap.read()

        # resize image and detect face
        frame_resize = cv2.resize(frame,
                                  None,
                                  fx=1 / DOWNSAMPLE_RATIO,
                                  fy=1 / DOWNSAMPLE_RATIO)
        print(frame_resize.shape)
        out.write(frame_resize)
        # read image
        image = frame_resize
        image = resize(image)

        cv2.imshow('a', frame_resize)
        fps.update()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.release()
    cv2.destroyAllWindows()
import numpy as np
import scipy.io as sio
import os
import shutil
from skimage.io import imread, imsave
import cv2
import os
from glob import glob

from api import PRN
import utils.depth_image as DepthImage

os.environ['CUDA_VISIBLE_DEVICES'] = '3'

prn = PRN(is_dlib=True, is_opencv=False)

# ------------- load data
image_folder = '/home/chang/dataset/A_face1'
save_folder = '/home/chang/dataset/A_depth'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

first_dir = os.listdir(image_folder)
for img in first_dir:
    # 二级目录绝对路径
    if img.split(".")[-1] == 'jpg' or img.split(".")[-1] == 'JPG':
        path_image = image_folder + '/' + str(img)
        if int((img.split(".")[0]).split("_")[-2]) > 1:
            #if int((img.split(".")[0]).split("_")[0]):
            #if (img.split(".")[0]).split("(")[0] == 'zheng ':
            image = imread(path_image)
Exemplo n.º 11
0
def out_vert(args):

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    # image_folder = args.inputDir
    # print (image_folder)
    #save_folder = args.outputDir
    base_dir = args.baseDir
    character = args.characterDir
    target_num = args.targNum

    #e.g. d:\characters\richardson\face\richardson_t10
    image_folder = "%s\\%s\\face\\%s_t%s" % (base_dir, character, character,
                                             target_num)
    print(image_folder)

    #e.g. d:\characters\richardson\vertices\richardson_t10
    save_folder = "%s\\%s\\vertices\\%s_t%s" % (base_dir, character, character,
                                                target_num)
    print(save_folder)

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)
    print(total_num)

    for i, image_path in enumerate(image_path_list):
        name = image_path.strip().split('\\')[-1][:-4]
        print(image_path)
        print(name)

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos = prn.process(image)  # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)

        image = image / 255.
        if pos is None:
            continue

        vertices = prn.get_vertices(pos)
        np.save("%s/%s" % (save_folder, name), vertices)
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]
Exemplo n.º 12
0
def main(args):

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    # image_folder = args.inputDir
    # save_folder = args.outputDir
    # vertices_dir = args.vertDir

    #i.e. d:\source
    base_dir = args.baseDir

    #i.e. d:\characters
    base_save_dir = args.baseSavedir

    #i.e. source\raupach
    scene = args.sceneDir

    #i.e source\raupach\richardson (the target character)
    character = args.characterDir

    #i.e. source\rauapch\richardson\richardson_001
    source_num = args.sourceNum

    #    targ_character = args.targChar
    #i.e. richardson_targ_10
    targ_num = args.targNum

    # something like D:\source\raupach\richardson\raupach_richardson_001
    image_folder = "%s\\%s\\%s\\%s_%s_%s" % (base_dir, scene, character, scene,
                                             character, source_num)
    print(image_folder)

    #something like d:\character\richardson\vertices\richards_t10
    vertices_dir = "%s\\%s\\vertices\\%s_t%s" % (base_save_dir, character,
                                                 character, targ_num)
    print(vertices_dir)

    #something like d:\character\raupach\src\align\raupach_richardson_t10_s001\\obj
    save_folder = "%s\\%s\\src\\align\\%s_%s_s%s_t%s\\obj" % (
        base_save_dir, character, scene, character, source_num, targ_num)
    print(save_folder)

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    # image_path_list= []
    # for root, dirs, files in os.walk('%s' % image_folder):
    #     for file in files:
    #         if file.endswith('.jpg'):
    #             image_path_list.append(file)
    # print (image_path_list)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)
    image_path_list = sorted(image_path_list)
    #print (image_path_list)

    # #repeating the above logic for a vertices directory.
    types = ('*.npy', '*.jpg')
    vert_path_list = []
    for files in types:
        vert_path_list.extend(glob(os.path.join(vertices_dir, files)))
    total_num_vert = len(vert_path_list)
    # vert_path_list.reverse()
    vert_path_list = sorted(vert_path_list)
    #print (vert_path_list)

    for i, image_path in enumerate(image_path_list):
        name = image_path.strip().split('\\')[-1][:-4]

        print("%s aligned with %s" % (image_path_list[i], vert_path_list[i]))

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos = prn.process(image)  # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)

        image = image / 255.
        if pos is None:
            continue

        vertices = prn.get_vertices(pos)
        #takes the nth file in the directory of the vertices to "frontalize" the source image.
        can_vert = vert_path_list[i]
        print(can_vert)
        save_vertices = align(vertices, can_vert)
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        colors = prn.get_colors(image, vertices)

        if args.isTexture:
            texture = cv2.remap(image,
                                pos[:, :, :2].astype(np.float32),
                                None,
                                interpolation=cv2.INTER_NEAREST,
                                borderMode=cv2.BORDER_CONSTANT,
                                borderValue=(0))
            if args.isMask:
                vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                      prn.uv_coords, h, w, prn.resolution_op)
                texture = texture * uv_mask[:, :, np.newaxis]
            write_obj_with_texture(
                os.path.join(save_folder,
                             name + '.obj'), save_vertices, colors,
                prn.triangles, texture, prn.uv_coords / prn.resolution_op
            )  #save 3d face with texture(can open with meshlab)
        else:
            write_obj(os.path.join(save_folder,
                                   name + '.obj'), save_vertices, colors,
                      prn.triangles)  #save 3d face(can open with meshlab)
Exemplo n.º 13
0
def get_3d(bbb):
    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)

    # ------------- load data
    # frame_id = "test_video/id00419/3U0abyjM2Po/00024"
    # mesh_file = os.path.join(root, frame_id + ".obj")
    # rt_file = os.path.join(root, frame_id + "_sRT.npy")
    # image_path

    # _file = open(os.path.join(root, 'txt',  "front_rt.pkl"), "rb")
    # data = pickle._Unpickler(_file)
    # data.encoding = 'latin1'
    # data = data.load()
    _file = open(os.path.join(root, 'txt', "front_rt.pkl"), "rb")
    data = pickle.load(_file)
    _file.close()
    gg = len(data)
    print(len(data))
    data = data[int(gg * 0.1 * bbb):int(gg * 0.1 * (bbb + 1))]
    for kk, item in enumerate(data):
        print(kk)

        target_id = item[-1]
        video_path = os.path.join(root, 'unzip', item[0] + '.mp4')
        if not os.path.exists(video_path):
            print(video_path)
            print('+++++')
            continue
        if os.path.exists(video_path[:-4] + '.obj'):
            print('-----')
            continue
        cap = cv2.VideoCapture(video_path)
        for i in range(target_id):
            ret, frame = cap.read()
        ret, target_frame = cap.read()
        cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame)
        target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

        image = target_frame
        # read image
        [h, w, c] = image.shape

        pos = prn.process(image)  # use dlib to detect face

        image = image / 255.
        if pos is None:
            continue

        # landmark
        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = 224 - kpt[:, 1]

        np.save(video_path[:-4] + '_prnet.npy', kpt)
        # 3D vertices
        vertices = prn.get_vertices(pos)
        # save_vertices, p = frontalize(vertices)
        # np.save(video_path[:-4] + '_p.npy', p)
        # if os.path.exists(video_path[:-4] + '.obj'):
        #     continue
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  # save 3d face(can open with meshlab)
Exemplo n.º 14
0
import numpy as np
import os
from glob import glob
import scipy.io as sio
from skimage.io import imread, imsave
from time import time

from api import PRN
from utils.write import write_obj_with_colors

# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # GPU number, -1 for CPU
prn = PRN(is_dlib=False)

# ------------- load data
image_folder = 'TestImages/tom_input/'
save_folder = 'TestImages/tom_output'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

types = ('*.jpg', '*.png')
image_path_list = []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)

# print(total_num)

for i, image_path in enumerate(image_path_list):
    print(image_path)
Exemplo n.º 15
0
def main(_):
    # init
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
    set_memory_growth()

    # load PRNet model
    cfg = load_yaml(FLAGS.cfg_path)
    model = PRN(cfg, is_dlib=True)

    # evaluation
    if not FLAGS.use_cam:  # on test-img
        print(
            "[*] Processing on images in {}. Press 's' to save result.".format(
                FLAGS.img_path))
        img_paths = glob.glob(os.path.join(FLAGS.img_path, '*'))
        for img_path in img_paths:
            img = cv2.imread(img_path)
            pos = model.process(img_path)
            if pos is None:
                continue

            vertices = model.get_vertices(pos)
            kpt = model.get_landmarks(pos)
            camera_matrix, _ = estimate_pose(vertices)

            cv2.imshow('Input', img)
            cv2.imshow('Sparse alignment', plot_kpt(img, kpt))
            cv2.imshow('Dense alignment', plot_vertices(img, vertices))
            cv2.imshow('Pose', plot_pose_box(img, camera_matrix, kpt))
            cv2.moveWindow('Input', 0, 0)
            cv2.moveWindow('Sparse alignment', 500, 0)
            cv2.moveWindow('Dense alignment', 1000, 0)
            cv2.moveWindow('Pose', 1500, 0)

            key = cv2.waitKey(0)
            if key == ord('q'):
                exit()
            elif key == ord('s'):
                cv2.imwrite(
                    os.path.join(FLAGS.save_path, os.path.basename(img_path)),
                    plot_kpt(img, kpt))
                print("Result saved in {}".format(FLAGS.save_path))

    else:  # webcam demo
        cap = cv2.VideoCapture(0)
        start_time = time.time()
        count = 1
        while (True):
            _, image = cap.read()

            pos = model.process(image)
            fps_str = 'FPS: %.2f' % (1 / (time.time() - start_time))
            start_time = time.time()
            cv2.putText(image, fps_str, (25, 25), cv2.FONT_HERSHEY_DUPLEX,
                        0.75, (0, 255, 0), 2)
            cv2.imshow('Input', image)
            cv2.moveWindow('Input', 0, 0)

            key = cv2.waitKey(1)
            if pos is None:
                cv2.waitKey(1)
                cv2.destroyWindow('Sparse alignment')
                cv2.destroyWindow('Dense alignment')
                cv2.destroyWindow('Pose')
                if key & 0xFF == ord('q'):
                    break
                continue

            else:
                vertices = model.get_vertices(pos)
                kpt = model.get_landmarks(pos)
                camera_matrix, _ = estimate_pose(vertices)

                result_list = [
                    plot_kpt(image, kpt),
                    plot_vertices(image, vertices),
                    plot_pose_box(image, camera_matrix, kpt)
                ]

                cv2.imshow('Sparse alignment', result_list[0])
                cv2.imshow('Dense alignment', result_list[1])
                cv2.imshow('Pose', result_list[2])
                cv2.moveWindow('Sparse alignment', 500, 0)
                cv2.moveWindow('Dense alignment', 1000, 0)
                cv2.moveWindow('Pose', 1500, 0)

                if key & 0xFF == ord('s'):
                    image_name = 'prnet_cam_' + str(count)
                    save_path = FLAGS.save_path

                    cv2.imwrite(
                        os.path.join(save_path, image_name + '_result.jpg'),
                        np.concatenate(result_list, axis=1))
                    cv2.imwrite(
                        os.path.join(save_path, image_name + '_image.jpg'),
                        image)
                    count += 1
                    print("Result saved in {}".format(FLAGS.save_path))

                if key & 0xFF == ord('q'):
                    break
Exemplo n.º 16
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN(is_dlib = args.isDlib) 

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list= []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):
        
        name = image_path.strip().split('/')[-1][:-4]
        
        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map    
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1]) 
            if max_size> 1000:
                image = rescale(image, 1000./max_size)
            pos = prn.process(image) # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256,256))
                pos = prn.net_forward(image/255.) # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1]-1, 0, image.shape[0]-1]) # cropped with bounding box
                pos = prn.process(image, box)

        image = image/255.
        if pos is None:
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:        
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices

        if args.isImage:
            imsave(os.path.join(save_folder, name + '.jpg'), image) 

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
                    texture = texture*uv_mask[:,:,np.newaxis]
                write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, colors, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
            else:
                write_obj(os.path.join(save_folder, name + '.obj'), save_vertices, colors, prn.triangles) #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w) 
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image) 

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {'vertices': save_vertices, 'colors': colors, 'triangles': prn.triangles})

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) 
        
        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) 

        if args.isShow:
            # ---------- Plot
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            cv2.imshow('dense alignment', plot_vertices(image, vertices))
            cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            cv2.waitKey(0)
Exemplo n.º 17
0
    """ Corresponding colors """
    colors = prn.get_colors(frame, vertices)
    print(colors)
    print(colors.shape)

    write_obj_with_colors(out_obj, vertices, prn.triangles, colors)
    # np.savetxt(os.path.join(save_folder, name + "_" + str(idx) + '.txt'), kpt) 
    
    print("Outputted {}".format(idx))
    # sio.savemat(out_mat, {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles})



  
# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
prn = PRN(is_dlib = True)

parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
parser.add_argument('--frames_path', type=str, help='NPath to .npy file', required=True)
parser.add_argument('--out_folder', type=str, help='Folder for output', required=True)
parser.add_argument('--out_name', type=str, help='Name for output', required=False
args = parser.parse_args()

print(args.frames_path)
print(args.out_folder)
print(args.out_name)

frames = np.load(args.frames_path)
print(frames.shape)
frames_to_objs(frames, args.out_folder, name=args.out_name)
Exemplo n.º 18
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib, is_faceboxes=args.isFaceBoxes)

    # ---- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)
        [h, w, c] = image.shape
        if c > 3: image = image[:, :, :3]  # RGBA图中,去除A通道

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos = prn.process(image)  # use dlib to detect face
        elif args.isFaceBoxes:
            pos, cropped_img = prn.process(
                image)  # use faceboxes to detect face
        else:
            if image.shape[0] == image.shape[1]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)
        image = image / 255.
        if pos is None: continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # 三维人脸旋转对齐方法
        # if args.isImage:
        #     vertices = prn.get_vertices(pos)
        #     scale_init = 180 / (np.max(vertices[:, 1]) - np.min(vertices[:, 1]))
        #     colors = prn.get_colors(image, vertices)
        #     triangles = prn.triangles
        #     camera_matrix, pose = estimate_pose(vertices)
        #     yaw, pitch, roll = pos * ANGULAR
        #     vertices1 = vertices - np.mean(vertices, 0)[np.newaxis, :]
        #
        #     obj = {'s': scale_init, 'angles': [-pitch, yaw, -roll + 180], 't': [0, 0, 0]}
        #     camera = {'eye':[0, 0, 256], 'proj_type':'perspective', 'at':[0, 0, 0],
        #               'near': 1000, 'far':-100, 'fovy':30, 'up':[0,1,0]}
        #
        #     image1 = transform_test(vertices1, obj, camera, triangles, colors, h=256, w=256) * 255
        #     image1 = image1.astype(np.uint8)
        #     imsave(os.path.join(save_folder, name + '.jpg'), image1)

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                if args.texture_size != 256:
                    pos_interpolated = resize(
                        pos, (args.texture_size, args.texture_size),
                        preserve_range=True)
                else:
                    pos_interpolated = pos.copy()
                texture = cv2.remap(image,
                                    pos_interpolated[:, :, :2].astype(
                                        np.float32),
                                    None,
                                    interpolation=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_CONSTANT,
                                    borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h,
                                                  w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                          prn.uv_coords, h, w,
                                          prn.resolution_op)
                    uv_mask = resize(uv_mask,
                                     (args.texture_size, args.texture_size),
                                     preserve_range=True)
                    texture = texture * uv_mask[:, :, np.newaxis]
                write_obj_with_texture(
                    os.path.join(save_folder, name + '.obj'), save_vertices,
                    prn.triangles, texture, prn.uv_coords / prn.resolution_op
                )  #save 3d face with texture(can open with meshlab)
            else:
                write_obj_with_colors(
                    os.path.join(save_folder,
                                 name + '.obj'), save_vertices, prn.triangles,
                    colors)  #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            depth = get_depth_image(vertices, prn.triangles, h, w)
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
            sio.savemat(os.path.join(save_folder, name + '_depth.mat'),
                        {'depth': depth})

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {
                'vertices': vertices,
                'colors': colors,
                'triangles': prn.triangles
            })

        if args.isKpt:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.is2dKpt and args.is68Align:
            ori_kpt = prn.get_landmarks_2d(pos)
            dlib_aligner = DlibAlign()
            dst_img = dlib_aligner.dlib_68_align(image, ori_kpt, 256, 0.5)
            imsave(os.path.join(save_folder, name + '.jpg'), dst_img)

        if args.isPose:
            # estimate pose
            camera_matrix, pose, rot = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'),
                       np.array(pose) * ANGULAR)
            np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'),
                       camera_matrix)

        if args.isShow:
            kpt = prn.get_landmarks(pos)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            # cv2.imshow('dense alignment', plot_vertices(image, vertices))
            # cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            cv2.waitKey(1)
def get_3d_folder(
    pkl
):  # the first cell is video path the last cell is the key frame nnuumber

    # ---- init PRN
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    _file = open(pkl, "rb")
    data = pickle.load(_file)
    _file.close()
    gg = len(data)
    print(len(data))
    # data = data[int(gg * 0.1 *bbb ): int(gg * 0.1 * (bbb + 1) ) ]
    for kk, item in enumerate(data):
        print(kk)

        target_id = item[-1]
        video_path = os.path.join(root, 'unzip', item[0])
        if not os.path.exists(video_path):
            print(video_path)
            print('+++++')
            continue
        if os.path.exists(video_path[:-4] + '.obj'):
            print('-----')
            continue
        cap = cv2.VideoCapture(video_path)
        for i in range(target_id):
            ret, frame = cap.read()
        ret, target_frame = cap.read()
        cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame)
        target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

        image = target_frame
        # read image
        [h, w, c] = image.shape

        pos = prn.process(image)  # use dlib to detect face

        image = image / 255.
        if pos is None:
            continue

        # landmark
        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = 224 - kpt[:, 1]

        np.save(video_path[:-4] + '_prnet.npy', kpt)
        # 3D vertices
        vertices = prn.get_vertices(pos)
        # save_vertices, p = frontalize(vertices)
        # np.save(video_path[:-4] + '_p.npy', p)
        # if os.path.exists(video_path[:-4] + '.obj'):
        #     continue
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices,
                              prn.triangles,
                              colors)  #save 3d face(can open with meshlab)
Exemplo n.º 20
0
def main():

    # OpenCV
    #cap = cv2.VideoCapture(args.video_source)
    cap = cv2.VideoCapture('b.mov')
    fps = video.FPS().start()

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    #while True:
    while cap.isOpened():
        ret, frame = cap.read()

        # resize image and detect face
        frame_resize = cv2.resize(frame,
                                  None,
                                  fx=1 / DOWNSAMPLE_RATIO,
                                  fy=1 / DOWNSAMPLE_RATIO)

        # read image
        image = frame_resize
        image = resize(image)

        [h, w, c] = image.shape
        if c > 3:
            image = image[:, :, :3]

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            st = time()
            pos = prn.process(image)  # use dlib to detect face
            print('process', time() - st)
        else:
            if image.shape[0] == image.shape[1]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos = prn.process(image, box)

        image = image / 255.
        if pos is None:
            cv2.imshow('a', frame_resize)
            fps.update()
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:, 1] = h - 1 - save_vertices[:, 1]
            #colors = prn.get_colors(image, vertices)
            #write_obj_with_colors(os.path.join('', 'webcam' + '.obj'), save_vertices, prn.triangles, colors)
        #if args.is3d:
        #    # corresponding colors
        #    colors = prn.get_colors(image, vertices)


#
#    if args.isTexture:
#        if args.texture_size != 256:
#            pos_interpolated = resize(pos, (args.texture_size, args.texture_size), preserve_range = True)
#        else:
#            pos_interpolated = pos.copy()
#        texture = cv2.remap(image, pos_interpolated[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
#        if args.isMask:
#            vertices_vis = get_visibility(vertices, prn.triangles, h, w)
#            uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
#            uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range = True)
#            texture = texture*uv_mask[:,:,np.newaxis]
#        #write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
#    else:
#        True
#        #write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
#
#if args.isDepth:
#    depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
#    depth = get_depth_image(vertices, prn.triangles, h, w)
#    #imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
#    #sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth':depth})
#
#if args.isKpt or args.isShow:
#    # get landmarks
#    kpt = prn.get_landmarks(pos)
#    #np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)
#
#if args.isPose or args.isShow:
#    # estimate pose
#    camera_matrix, pose = estimate_pose(vertices)

#write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors)

        rendering_cc = mesh.render.render_grid(save_vertices, prn.triangles,
                                               900, 900)
        a = np.transpose(rendering_cc, axes=[1, 0, 2])
        dim = rendering_cc.shape[0]

        i_t = np.ones([dim, dim, 3], dtype=np.float32)
        for i in range(dim):
            i_t[i] = a[dim - 1 - i]
        i_t = i_t / 255
        #imsave('webcam.png', i_t)

        #kpt = prn.get_landmarks(pos)

        #cv2.imshow('frame', image)
        #cv2.imshow('a',i_t/255)

        #cv2.imshow('sparse alignment', np.concatenate([image, i_t], axis=1))
        cv2.imshow('sparse alignment', i_t)
        cv2.imshow('vedio', image)
        #cv2.imshow('sparse alignment', np.concatenate([plot_kpt(image, kpt), i_t], axis=1))
        #cv2.imshow('dense alignment', plot_vertices(image, vertices))
        #cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))

        fps.update()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 21
0
import scipy.io as sio
from skimage.io import imread, imsave
from time import time

from api import PRN
from utils.write import write_obj_with_colors

import cv2
from utils.cv_plot import plot_kpt

from PIL import Image
from subprocess import Popen, PIPE

# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '1'  # GPU number, -1 for CPU
prn = PRN(is_dlib=True)
save_folder = 'TestImages/results'

cam = cv2.VideoCapture(0)
frame_width = int(cam.get(3))
frame_height = int(cam.get(4))

ind = 0

fps, duration = 24, 100

out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                      24, (frame_width, frame_height))

while (True):
    ret_val, img = cam.read()
Exemplo n.º 22
0
import scipy.io as sio
from skimage.io import imread, imsave
import cv2
import os

from api import PRN
import utils.depth_image as DepthImage
import glob
import time
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

live_path = "/home/dmp/PRNet-Depth-Generation/val/live"
live_depth_path = "/home/dmp/PRNet-Depth-Generation/val/live_depth"
fake_path = "/home/dmp/PRNet-Depth-Generation/val/fake"

prn = PRN(is_dlib=False, is_opencv=False)

img_path = "/home/dmp/Videos/val/fake"
video_paths = glob.glob(img_path + "/*")
videos = []
for video_path in video_paths:
    print(video_path)
    if video_path in videos:
        continue
    cap = cv2.VideoCapture(video_path)
    frame_number = 0
    while cap.isOpened():
        if frame_number % 8 != 0:
            frame_number += 1
            continue
        ret, image = cap.read()
Exemplo n.º 23
0
from time import time
import sys
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2 as cv
from api import PRN
from utils.write import write_obj_with_colors
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def RotateClockWise90(img):
    trans_img = cv.transpose( img )
    new_img = cv.flip(trans_img, 1)
    return new_img

# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
prn = PRN(is_dlib = True) 


# ------------- load data
image_folder = '/media/weepies/Seagate Backup Plus Drive/3DMM/3d-pixel/evaluate_data/image/'
#image_folder = '/home/weepies/3DMM/3DFAW/output/'
save_folder = '/media/weepies/Seagate Backup Plus Drive/3DMM/3dp_chosse/3dpixel/finetune_rec/'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

types = ('*.jpg', '*.png')
image_path_list= []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)
Exemplo n.º 24
0
import numpy as np
import os
from glob import glob
import scipy.io as sio
from skimage.io import imread, imsave
from time import time

from api import PRN
from utils.write import write_obj

# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
prn = PRN(is_dlib = False) 


# ------------- load data
image_folder = 'TestImages/AFLW2000/'
save_folder = 'TestImages/AFLW2000_results'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

types = ('*.jpg', '*.png')
image_path_list= []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)

for i, image_path in enumerate(image_path_list):
    # read image
    image = imread(image_path)
Exemplo n.º 25
0
def main(args):
    #---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU
    prn = PRN(is_dlib = args.isDlib)

    if mode == FAKE:
        dataset_folder_path = "/home/wukong/librealsense/examples/realsense-dataset/attack_dataset"
    elif mode == REAL:
        dataset_folder_path = "/home/wukong/librealsense/examples/realsense-dataset/all_dataset"
    elif mode == PRINT:
        dataset_folder_path = "/home/wukong/librealsense/examples/realsense-dataset/all_dataset"

    dataset_folder_list = os.walk(dataset_folder_path).next()[1]
    train_folder_path = "/home/wukong/anaconda3/dataset/phase7/train"
    train_folder_list = os.walk(train_folder_path).next()[1]
    for fname in dataset_folder_list:
        source_path = os.path.join(dataset_folder_path, fname)
        hs_warp_folder = os.path.join(source_path, "hs_raw_warp")
        rs_color_folder = os.path.join(source_path, "rs_raw_color")
        rs_depth_folder = os.path.join(source_path, "rs_raw_depth")

        hs_warp_list = sorted(glob(os.path.join(hs_warp_folder, '*.jpg')))
        rs_color_list = sorted(glob(os.path.join(rs_color_folder, '*.jpg')))
        rs_depth_list = sorted(glob(os.path.join(rs_depth_folder, '*.jpg')))

        if not hs_warp_list or not rs_color_list or not rs_depth_list:
            print("skip ", source_path)
            continue
        elif len(hs_warp_list) != len(rs_color_list) or len(hs_warp_list) != len(rs_depth_list):
            print("skip ", source_path)
            continue
        else:
            pass

        if mode == FAKE:
            new_fname = "1_" + fname[1:] + "_3_1_4"
        elif mode == REAL:
            new_fname = "1_" + fname[1:] + "_1_1_1"
        elif mode == PRINT:
            new_fname = "1_" + fname[1:] + "_0_0_0"

        if new_fname not in train_folder_list:
            new_path = os.path.join(train_folder_path, new_fname)
            os.mkdir(new_path)
            new_depth_path = os.path.join(new_path, "depth")
            os.mkdir(new_depth_path)
            new_profile_path = os.path.join(new_path, "profile")
            os.mkdir(new_profile_path)
            new_rs_path = os.path.join(new_path, "rs")
            os.mkdir(new_rs_path)
            print("create new folder: {}".format(fname))
        else:
            print("skip {}".format(new_fname))
            continue

        spatial_coordinate_idx = index.Index(properties=p)
        count_num = 1
        total_num = len(hs_warp_list)
        for j in range(total_num):
            if j % 10 == 0:
                print("has processed {} of {} images".format(j, total_num))

            hs_warp_image = imread(hs_warp_list[j])
            [h, w, c] = hs_warp_image.shape
            if c>3:
                hs_warp_image = hs_warp_image[:,:,:3]

            # the core: regress position map
            if args.isDlib:
                max_size = max(hs_warp_image.shape[0], hs_warp_image.shape[1])
                if max_size> 1000:
                    hs_warp_image = rescale(hs_warp_image, 1000./max_size)
                    hs_warp_image = (hs_warp_image*255).astype(np.uint8)
                hs_pos = prn.process(hs_warp_image) # use dlib to detect face
            else:
                if hs_warp_image.shape[0] == hs_warp_image.shape[1]:
                    hs_warp_image = resize(hs_warp_image, (256,256))
                    hs_pos = prn.net_forward(hs_warp_image/255.) # input hs_warp_image has been cropped to 256x256
                else:
                    box = np.array([0, hs_warp_image.shape[1]-1, 0, hs_warp_image.shape[0]-1]) # cropped with bounding box
                    hs_pos = prn.process(hs_warp_image, box)

            hs_warp_image = hs_warp_image/255.
            if hs_pos is None:
                continue
            hs_vertices = prn.get_vertices(hs_pos)

            camera_matrix, euler_pose = estimate_pose(hs_vertices)
            # check similarity with previous pose
            hit = spatial_coordinate_idx.nearest((euler_pose[0], euler_pose[1], euler_pose[2], euler_pose[0], euler_pose[1], euler_pose[2]), 1, objects=True)
            hit = [i for i in hit]
            if hit:
                nearest_euler_pose = np.array(hit[0].bbox[:3])
                current_euler_pose = np.array(euler_pose)
                dist = np.linalg.norm(current_euler_pose - nearest_euler_pose)
                if dist > SPATIAL_THRESHOLD_DEGREE:
                    print("Get a new euler pose {}".format(euler_pose))
                    spatial_coordinate_idx.insert(0,(euler_pose[0], euler_pose[1], euler_pose[2], euler_pose[0], euler_pose[1], euler_pose[2]))
                else:
                    continue
            else:
                print("First euler_pose: {}".format(euler_pose))  
                spatial_coordinate_idx.insert(0,(euler_pose[0], euler_pose[1], euler_pose[2], euler_pose[0], euler_pose[1], euler_pose[2]))

            ##############################################
            #            
            ##############################################

            if mode == FAKE:
                imsave(os.path.join(new_profile_path, ('%04d' % count_num) + '.jpg'), plot_crop(hs_warp_image, hs_vertices))
                rs_depth_image = imread(rs_depth_list[j])
                imsave(os.path.join(new_depth_path, ('%04d' % count_num) + '.jpg'), plot_crop(rs_depth_image, hs_vertices))
            elif mode == PRINT:
                rs_color_image = imread(rs_color_list[j])
                imsave(os.path.join(new_rs_path, ('%04d' % count_num) + '.jpg'), rs_color_image)
            elif mode == REAL:
                rs_color_image = imread(rs_color_list[j])
                [h, w, c] = rs_color_image.shape
                if c>3:
                    rs_color_image = rs_color_image[:,:,:3]

                # the core: regress position map
                if args.isDlib:
                    max_size = max(rs_color_image.shape[0], rs_color_image.shape[1])
                    if max_size> 1000:
                        rs_color_image = rescale(rs_color_image, 1000./max_size)
                        rs_color_image = (rs_color_image*255).astype(np.uint8)
                    rs_pos = prn.process(rs_color_image) # use dlib to detect face
                else:
                    if rs_color_image.shape[0] == rs_color_image.shape[1]:
                        rs_color_image = resize(rs_color_image, (256,256))
                        rs_pos = prn.net_forward(rs_color_image/255.) # input rs_color_image has been cropped to 256x256
                    else:
                        box = np.array([0, rs_color_image.shape[1]-1, 0, rs_color_image.shape[0]-1]) # cropped with bounding box
                        rs_pos = prn.process(rs_color_image, box)

                rs_color_image = rs_color_image/255.
                if rs_pos is None:
                    continue
                rs_vertices = prn.get_vertices(rs_pos)

                rs_depth_image = imread(rs_depth_list[j])
                imsave(os.path.join(new_profile_path, ('%04d' % count_num) + '.jpg'), plot_crop(hs_warp_image, rs_vertices))
                imsave(os.path.join(new_depth_path, ('%04d' % count_num) + '.jpg'), plot_crop(rs_depth_image, rs_vertices))
                imsave(os.path.join(new_rs_path, ('%04d' % count_num) + '.jpg'), plot_crop(rs_color_image, rs_vertices))

            count_num += 1
Exemplo n.º 26
0
def main(data_dir):
    # 1) Create Dataset of 300_WLP & Dataloader.
    wlp300 = PRNetDataset(root_dir=data_dir,
                          transform=transforms.Compose([ToTensor(),
                                                        ToNormalize(FLAGS["normalize_mean"], FLAGS["normalize_std"])]))

    wlp300_dataloader = DataLoader(dataset=wlp300, batch_size=FLAGS['batch_size'], shuffle=True, num_workers=0)

    # 2) Intermediate Processing.
    transform_img = transforms.Compose([
        # transforms.ToTensor(),
        transforms.Normalize(FLAGS["normalize_mean"], FLAGS["normalize_std"])
    ])

    # # 3) Create PRNet model.
    # start_epoch, target_epoch = FLAGS['start_epoch'], FLAGS['target_epoch']
    # model = ResFCN256()
    #
    # # Load the pre-trained weight
    # if FLAGS['resume'] and os.path.exists(os.path.join(FLAGS['images'], "3channels.pth")):
    #     state = torch.load(os.path.join(FLAGS['images'], "3channels.pth"))
    #     model.load_state_dict(state['prnet'])
    #     start_epoch = state['start_epoch']
    #     INFO("Load the pre-trained weight! Start from Epoch", start_epoch)
    #
    # model.to("cuda")
    prn = PRN(os.path.join(FLAGS['images'], "3channels.pth"))

    bar = tqdm(wlp300_dataloader)
    nme_list = []
    for i, sample in enumerate(bar):
        uv_map, origin = sample['uv_map'].to(FLAGS['device']), sample['origin'].to(FLAGS['device'])
        # print(origin.shape)
        # Inference.
        # origin = cv2.resize(origin, (256, 256))
        # origin = transform_img(origin)
        # origin = origin.unsqueeze(0)
        uv_map_result = prn.net_forward(origin.cuda())
        out = uv_map_result.cpu().detach().numpy()
        uv_map_result = np.squeeze(out)
        cropped_pos = uv_map_result * 255
        uv_map_result = cropped_pos.transpose(1, 2, 0)

        out = uv_map.cpu().detach().numpy()
        uv_map = np.squeeze(out)
        cropped_pos = uv_map * 255
        uv_map = cropped_pos.transpose(1, 2, 0)

        kpt_predicted = prn.get_landmarks(uv_map_result)[:, :2]
        kpt_gt = prn.get_landmarks(uv_map)[:, :2]

        nme_sum = 0
        for j in range(kpt_gt.shape[0]):
            x = kpt_gt[j][0] - kpt_predicted[j][0]
            y = kpt_gt[j][1] - kpt_predicted[j][1]
            L2_norm = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
            # bounding box size has been fixed to 256x256
            d = 256*256
            error = L2_norm/d
            nme_sum += error
        nme_list.append(nme_sum/68)

    print(np.mean(nme_list))
Exemplo n.º 27
0
parser.add_argument('--isDepth', default=False, type=ast.literal_eval,
                    help='whether to output depth image')
# update in 2017/4/27
parser.add_argument('--isTexture', default=False, type=ast.literal_eval,
                    help='whether to save texture in obj file')
parser.add_argument('--isMask', default=False, type=ast.literal_eval,
                    help='whether to set invisible pixels(due to self-occlusion) in texture as 0')
# update in 2017/7/19
parser.add_argument('--texture_size', default=256, type=int,
                    help='size of texture map, default is 256. need isTexture is True')

args = parser.parse_args()

# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU
prn = PRN(is_dlib = args.isDlib) 


# ------------- load data
# image_folder = 'TestImages/AFLW2000/'
save_folder = 'TestImages/AFLW2000_results'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

types = ('*.jpg', '*.png')
image_path_list= []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)

# -------------- prepare cv
Exemplo n.º 28
0
def main(args):
    if args.isShow:
        args.isOpencv = True
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box
    if args.isObj:
        from utils.write import write_obj
    if args.isPose:
        from utils.estimate_pose import estimate_pose

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU

    prn = PRN(is_dlib=args.isDlib, is_opencv=args.isOpencv)

    # ------------- load data
    image_folder = args.inputFolder
    save_folder = args.outputFolder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)

        # the core: regress position map
        pos = prn.process(image)  # use dlib to detect face

        if args.isObj or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            # corresponding colors
            colors = prn.get_colors(image, vertices)
            write_obj(os.path.join(save_folder,
                                   name + '.obj'), vertices, colors,
                      prn.triangles)  #save 3d face(can open with meshlab)

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)

        if args.isShow:
            # ---------- Plot
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            cv2.imshow('dense alignment', plot_vertices(image, vertices))
            cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            cv2.waitKey(0)
Exemplo n.º 29
0
import numpy as np
import os
from glob import glob
import scipy.io as sio
from skimage.io import imread, imsave
from time import time

from api import PRN
from utils.write import write_obj

# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # GPU number, -1 for CPU
prn = PRN(is_dlib=True)

# ------------- load data
image_folder = 'TestImages/LukePics/'
save_folder = 'TestImages/LukePics_results'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

types = ('*.jpg', '*.png')
image_path_list = []
for files in types:
    image_path_list.extend(glob(os.path.join(image_folder, files)))
total_num = len(image_path_list)

for i, image_path in enumerate(image_path_list):
    # read image
    image = imread(image_path)

    # the core: regress position map
from api import PRN
import utils.depth_image as DepthImage
#import numpy as np
import os
from skimage.io import imread, imsave
from skimage.transform import estimate_transform, warp
from time import time
from PIL import Image

import cv2
from predictor import PosPrediction

os.environ['CUDA_VISIBLE_DEVICES'] = '2'

prn = PRN(is_dlib=True, is_opencv=False)
prefix = '.'

if True:
    import dlib
    detector_path = os.path.join(prefix,
                                 'Data/net-data/mmod_human_face_detector.dat')
    face_detector = dlib.cnn_face_detection_model_v1(detector_path)


def dlib_detect(image):
    return face_detector(image, 1)


# ------------- load data
image_folder = '/home/chang/dataset/B_test'
Exemplo n.º 31
0
def main(args):
    if args.isShow or args.isTexture:
        import cv2
        from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box

    # ---- init PRN
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu  # GPU number, -1 for CPU
    prn = PRN(is_dlib=args.isDlib)

    # ------------- load data
    image_folder = args.inputDir
    save_folder = args.outputDir
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    types = ('*.jpg', '*.png')
    image_path_list = []
    if os.path.isfile(image_folder):
        image_path_list.append(image_folder)
    for files in types:
        image_path_list.extend(glob(os.path.join(image_folder, files)))
    total_num = len(image_path_list)

    for i, image_path in enumerate(image_path_list):

        name = image_path.strip().split('/')[-1][:-4]

        # read image
        image = imread(image_path)
        [h, w, _] = image.shape

        # the core: regress position map
        if args.isDlib:
            max_size = max(image.shape[0], image.shape[1])
            if max_size > 1000:
                image = rescale(image, 1000. / max_size)
                image = (image * 255).astype(np.uint8)
            pos, crop_image = prn.process(image)  # use dlib to detect face
        else:
            if image.shape[1] == image.shape[2]:
                image = resize(image, (256, 256))
                pos = prn.net_forward(
                    image / 255.)  # input image has been cropped to 256x256
                crop_image = None
            else:
                box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1
                                ])  # cropped with bounding box
                pos, crop_image = prn.process(image, box)

        image = image / 255.
        if pos is None:
            continue

        if args.is3d or args.isMat or args.isPose or args.isShow:
            # 3D vertices
            vertices = prn.get_vertices(pos)
            if args.isFront:
                save_vertices = frontalize(vertices)
            else:
                save_vertices = vertices.copy()
            save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        if args.isImage and crop_image is not None:
            imsave(os.path.join(save_folder, name + '_crop.jpg'), crop_image)
            imsave(os.path.join(save_folder, name + '_orig.jpg'), image)

        if args.is3d:
            # corresponding colors
            colors = prn.get_colors(image, vertices)

            if args.isTexture:
                texture = cv2.remap(image,
                                    pos[:, :, :2].astype(np.float32),
                                    None,
                                    interpolation=cv2.INTER_NEAREST,
                                    borderMode=cv2.BORDER_CONSTANT,
                                    borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h,
                                                  w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles,
                                          prn.uv_coords, h, w,
                                          prn.resolution_op)
                    texture = texture * uv_mask[:, :, np.newaxis]
                write_obj_with_texture(
                    os.path.join(save_folder,
                                 name + '.obj'), save_vertices, colors,
                    prn.triangles, texture, prn.uv_coords / prn.resolution_op
                )  #save 3d face with texture(can open with meshlab)
            else:
                write_obj(os.path.join(save_folder,
                                       name + '.obj'), save_vertices, colors,
                          prn.triangles)  #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            depth = get_depth_image(vertices, prn.triangles, h, w)
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
            sio.savemat(os.path.join(save_folder, name + '_depth.mat'),
                        {'depth': depth})

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {
                'vertices': vertices,
                'colors': colors,
                'triangles': prn.triangles
            })

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)
            np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'),
                       camera_matrix)

            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)

        if args.isShow:
            # ---------- Plot
            image_pose = plot_pose_box(image, camera_matrix, kpt)
            cv2.imshow('sparse alignment', plot_kpt(image, kpt))
            cv2.imshow('dense alignment', plot_vertices(image, vertices))
            cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt))
            if crop_image is not None:
                cv2.imshow('crop', crop_image)
            cv2.waitKey(0)
Exemplo n.º 32
0
def get_3d_single(video_path=None,
                  target_id=None,
                  img_path=None,
                  device_id='3'):
    # ---- init PRN
    target_id = count_frames(video_path)
    os.environ['CUDA_VISIBLE_DEVICES'] = device_id  # GPU number, -1 for CPU
    prn = PRN(is_dlib=True)
    # if video_path != None:
    #     if not os.path.exists(video_path):
    #         print (video_path)
    #         print ('+++++')
    #     if os.path.exists(video_path[:-4] + '.obj'):
    #         print ('-----')
    #     cap = cv2.VideoCapture(video_path)
    #     for i in range(target_id):
    #         ret, frame = cap.read()
    #     ret, target_frame = cap.read()
    #     cv2.imwrite(video_path[:-4] + '_%05d.png'%target_id,target_frame)
    # elif img_path != None:
    #     target_frame = cv2.imread(img_path)
    # target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)

    cap = cv2.VideoCapture(video_path)
    for i in range(target_id):
        ret, frame = cap.read()
        print(target_path[:-4] + '_%05d.png' % i)
        cv2.imwrite(target_path[:-4] + '_%05d.png' % i, frame)
        # tt = cv2.imread(target_path[:-4] + '_%05d.png' % i)
        # target_frame = cv2.cvtColor(cv2.imread(target_path[:-4] + '_%05d.png' % i), cv2.COLOR_BGR2RGB)
        target_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = target_frame

        [h, w, c] = image.shape

        pos = prn.process(image)
        image = image / 255

        kpt = prn.get_landmarks(pos)
        kpt[:, 1] = 224 - kpt[:, 1]
        if video_path is not None:
            print(target_path[:-4] + '_%05d' % i + '_prnet.npy')
            np.save(target_path[:-4] + '_%05d' % i + '_prnet.npy', kpt)
        else:
            np.save(img_path[:-4] + '_%05d' % i + '_prnet.npy', kpt)

        vertices = prn.get_vertices(pos)
        # save_vertices, p = frontalize(vertices)
        # np.save(video_path[:-4] + '_p.npy', p)
        # if os.path.exists(video_path[:-4] + '.obj'):
        #     continue
        save_vertices = vertices.copy()
        save_vertices[:, 1] = h - 1 - save_vertices[:, 1]

        # corresponding colors
        colors = prn.get_colors(image, vertices)

        # print (colors.shape)
        # print ('=========')
        # cv2.imwrite('./mask.png', colors * 255)
        if video_path != None:
            write_obj_with_colors(
                target_path[:-4] + '_%05d' % i + '_original.obj',
                save_vertices, prn.triangles,
                colors)  # save 3d face(can open with meshlab)
        else:
            write_obj_with_colors(
                img_path[:-4] + '_%05d' % i + '_original.obj', save_vertices,
                prn.triangles, colors)  # save 3d face(can open with meshlab)