def predict(self, img_path):
     img = Image.open(img_path)
     lm = get_5_face_landmarks(np.array(img))
     input_img, lm_new, transform_params = Preprocess(img, lm, self.lm3D)
     print('!!!', transform_params)
     with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
         images = tf.placeholder(name='input_imgs',
                                 shape=[None, 224, 224, 3],
                                 dtype=tf.float32)
         tf.import_graph_def(self.graph_def,
                             name='resnet',
                             input_map={'input_imgs:0': images})
         coeff = graph.get_tensor_by_name('resnet/coeff:0')
         with tf.Session() as sess:
             coef = sess.run(coeff, feed_dict={images: input_img})
             # reconstruct 3D face with output coefficients and face model
             face_shape, face_texture, face_color, tri, face_projection, z_buffer, landmarks_2d = Reconstruction(
                 coef, self.facemodel)
             # reshape outputs
             input_img = np.squeeze(input_img)
             shape = np.squeeze(face_shape, (0))
             color = np.squeeze(face_color, (0))
             landmarks_2d = np.squeeze(landmarks_2d, (0))
             print(img_path, shape.shape, input_img.shape, color.shape,
                   face_projection.shape)
             #cv2.imwrite('out/%s' % img_path.split('/')[-1], input_img)
     return shape, color
Exemple #2
0
def demo_19news(n1, n2):
    lm3D = load_lm3d()
    n = 0
    for n in range(n1, n2):
        #print(n)
        start = 0
        file = os.path.join('../Data', str(n), 'frame%d.png' % start)
        #print(file)
        if not os.path.exists(file[:-4] + '.txt'):
            continue
        img, lm = load_img(file, file[:-4] + '.txt')
        input_img, lm_new, transform_params = Preprocess(img, lm,
                                                         lm3D)  # lm_new 5x2
        input_img = np.squeeze(input_img)
        img1 = Image.fromarray(input_img[:, :, ::-1])

        scale = 0.5 * (lm[0][0] -
                       lm[1][0]) / (lm_new[0][0] - lm_new[1][0]) + 0.5 * (
                           lm[3][0] - lm[4][0]) / (lm_new[3][0] - lm_new[4][0])
        #print(scale)
        trans = np.mean(lm - lm_new * scale, axis=0)
        trans = np.round(trans).astype(np.int32)
        w, h = img1.size
        w2 = int(round(w * scale))
        h2 = int(round(h * scale))
        img1 = img1.resize((w2, h2), resample=Image.LANCZOS)
        img.paste(img1, (trans[0], trans[1], trans[0] + img1.size[0],
                         trans[1] + img1.size[1]))
        np.save(os.path.join('../Data', str(n), 'transbig.npy'),
                np.array([w2, h2, trans[0], trans[1]]))
        #print(os.path.join('../Data',str(n),'transbig.npy'))
        img.save('combine.png')
Exemple #3
0
def demo():
	# input and output folder
	image_path = 'input'
	save_path = 'output'	
	img_list = glob.glob(image_path + '/' + '*.png')

	# read BFM face model
	# transfer original BFM model to our model
	if not os.path.isfile('./BFM/BFM_model_front.mat'):
		transferBFM09()

	# read face model
	facemodel = BFM()
	# read standard landmarks for preprocessing images
	lm3D = load_lm3d()
	n = 0

	# build reconstruction model
	with tf.Graph().as_default() as graph,tf.device('/cpu:0'):

		images = tf.placeholder(name = 'input_imgs', shape = [None,224,224,3], dtype = tf.float32)
		graph_def = load_graph('network/FaceReconModel.pb')
		tf.import_graph_def(graph_def,name='resnet',input_map={'input_imgs:0': images})

		# output coefficients of R-Net (dim = 257) 
		coeff = graph.get_tensor_by_name('resnet/coeff:0')

		with tf.Session() as sess:
			print('reconstructing...')
			for file in img_list:
				n += 1
				print(n)
				# load images and corresponding 5 facial landmarks
				img,lm = load_img(file,file.replace('png','txt'))
				# preprocess input image
				input_img,lm_new,transform_params = Preprocess(img,lm,lm3D)

				coef = sess.run(coeff,feed_dict = {images: input_img})

				# reconstruct 3D face with output coefficients and face model
				face_shape,face_texture,face_color,tri,face_projection,z_buffer,landmarks_2d = Reconstruction(coef,facemodel)

				# reshape outputs
				input_img = np.squeeze(input_img)
				shape = np.squeeze(face_shape, (0))
				color = np.squeeze(face_color, (0))
				landmarks_2d = np.squeeze(landmarks_2d, (0))

				# save output files
				# cropped image, which is the direct input to our R-Net
				# 257 dim output coefficients by R-Net
				# 68 face landmarks of cropped image
				savemat(os.path.join(save_path,file.split('/')[-1].replace('.png','.mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coef,'landmarks_2d':landmarks_2d,'lm_5p':lm_new})
				save_obj(os.path.join(save_path,file.split('/')[-1].replace('.png','_mesh.obj')),shape,tri,np.clip(color,0,255)/255) # 3D reconstruction face (in canonical view)
Exemple #4
0
def demo(args):
    # input and output folder
    image_path = args.input
    save_path = args.output
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # read BFM face model
    facemodel = BFM()
    # transfer original BFM model to our model
    if not os.path.isfile('./BFM/BFM_model_front.mat'):
        transferBFM09()

    # read standard landmarks for preprocessing images
    lm3D = load_lm3d()
    batchsize = 1

    # build reconstruction model
    with tf.Graph().as_default() as graph, tf.device('/gpu:0'):

        FaceReconstructor = Face3D()
        images = tf.placeholder(name='input_imgs',
                                shape=[batchsize, 224, 224, 3],
                                dtype=tf.float32)
        graph_def = load_graph('network/FaceReconModel.pb')
        tf.import_graph_def(graph_def,
                            name='resnet',
                            input_map={'input_imgs:0': images})

        # output coefficients of R-Net (dim = 257)
        coeff = graph.get_tensor_by_name('resnet/coeff:0')

        with tf.device('/cpu:0'):
            # renderer layer
            faceshaper = tf.placeholder(name="face_shape_r",
                                        shape=[1, 35709, 3],
                                        dtype=tf.float32)
            facenormr = tf.placeholder(name="face_norm_r",
                                       shape=[1, 35709, 3],
                                       dtype=tf.float32)
            facecolor = tf.placeholder(name="face_color",
                                       shape=[1, 35709, 3],
                                       dtype=tf.float32)
            rendered = Render_layer(faceshaper, facenormr, facecolor,
                                    facemodel, 1)

            rstimg = tf.placeholder(name='rstimg',
                                    shape=[224, 224, 4],
                                    dtype=tf.uint8)
            encode_png = tf.image.encode_png(rstimg)

        # reconstructing faces
        FaceReconstructor.Reconstruction_Block(coeff, batchsize)
        face_shape = FaceReconstructor.face_shape_t
        face_texture = FaceReconstructor.face_texture
        face_color = FaceReconstructor.face_color
        landmarks_2d = FaceReconstructor.landmark_p
        recon_img = FaceReconstructor.render_imgs
        tri = FaceReconstructor.facemodel.face_buf

        # MTCNN Detector
        detector = MTCNN()
        img, lm = load_img_and_lm(image_path, detector)

        with tf.Session() as sess:
            print('reconstructing...')
            # load images and corresponding 5 facial landmarks

            # preprocess input image
            input_img, lm_new, transform_params, posion = Preprocess(
                img, lm, lm3D)

            coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
             face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

            # renderer output
            face_shape_r, face_norm_r, face_color, tri = Reconstruction_for_render(
                coeff_, facemodel)
            final_images = sess.run(rendered,
                                    feed_dict={
                                        faceshaper:
                                        face_shape_r.astype('float32'),
                                        facenormr:
                                        face_norm_r.astype('float32'),
                                        facecolor: face_color.astype('float32')
                                    })
            result_image = final_images[0, :, :, :]
            result_image = np.clip(result_image, 0., 1.).copy(order='C')
            # save renderer output
            result_bytes = sess.run(encode_png, {rstimg: result_image * 255.0})
            result_output_path = os.path.join(
                save_path,
                image_path.split(os.path.sep)[-1].replace(
                    '.png', '_render.png').replace('jpg', '_render.png'))
            with open(result_output_path, 'wb') as output_file:
                output_file.write(result_bytes)

            # get RGB image from RGBA
            rgb_renderer_img, mask = RGBA2RGB(result_image)
            # Paste the 3D rendered image back to the original image
            renderer_3D_input_img = np.copy(img)
            left0 = int(posion[0] * posion[4])
            right0 = int(posion[1] * posion[4])
            up0 = int(posion[2] * posion[4])
            below0 = int(posion[3] * posion[4])
            rgb_renderer_img = cv2.resize(rgb_renderer_img,
                                          (right0 - left0, below0 - up0))
            mask = cv2.resize(mask, (right0 - left0, below0 - up0))
            mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            mask.astype('uint32')
            if left0 < 0:
                mask = mask[:, -left0:]
                rgb_renderer_img = rgb_renderer_img[:, -left0:]
                left0 = 0
            if up0 < 0:
                mask = mask[-up0:, :]
                rgb_renderer_img = rgb_renderer_img[-up0:, :]
                up0 = 0
            if right0 > renderer_3D_input_img.shape[1]:
                mask = mask[:, :-(right0 - renderer_3D_input_img.shape[1])]
                rgb_renderer_img = rgb_renderer_img[:, :-(
                    right0 - renderer_3D_input_img.shape[1])]
                right0 = renderer_3D_input_img.shape[1]
            if below0 > renderer_3D_input_img.shape[0]:
                mask = mask[:-(below0 - renderer_3D_input_img.shape[0]), :]
                rgb_renderer_img = rgb_renderer_img[:-(
                    below0 - renderer_3D_input_img.shape[0]), :]
                below0 = renderer_3D_input_img.shape[0]

            renderer_3D_input_img[
                up0:below0, left0:right0] = renderer_3D_input_img[
                    up0:below0, left0:right0] * mask + rgb_renderer_img
            renderer_3D_input_img = cv2.cvtColor(renderer_3D_input_img,
                                                 cv2.COLOR_BGR2RGB)
            cv2.imwrite(
                os.path.join(
                    save_path,
                    image_path.split(os.path.sep)[-1].replace(
                        '.png', '_renderer_in_original.png').replace(
                            'jpg', '_renderer_in_original.png')),
                renderer_3D_input_img)

            # reshape outputs
            input_img = np.squeeze(input_img)
            face_shape_ = np.squeeze(face_shape_, (0))
            face_texture_ = np.squeeze(face_texture_, (0))
            face_color_ = np.squeeze(face_color_, (0))
            landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
            if not is_windows:
                recon_img_ = np.squeeze(recon_img_, (0))

            # save output files
            if not is_windows:
                savemat(os.path.join(save_path,image_path.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                 'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
            save_obj(
                os.path.join(
                    save_path,
                    image_path.split(os.path.sep)[-1].replace(
                        '.png', '_mesh.obj').replace('jpg', '_mesh.obj')),
                face_shape_, tri_,
                np.clip(face_color_, 0, 255) /
                255)  # 3D reconstruction face (in canonical view)
Exemple #5
0
def demo():
    # input and output folder
    in_dir = 'input_vids'
    out_dir = 'output2'
    # img_list = glob.glob(image_path + '/' + '*.jpg')
    vid_list = [
        os.path.join(in_dir, f) for f in os.listdir(in_dir)
        if not f.startswith('.')
    ]

    # read BFM face model
    # transfer original BFM model to our model
    if not os.path.isfile('./BFM/BFM_model_front.mat'):
        transferBFM09()

    # read face model
    facemodel = BFM()
    # read standard landmarks for preprocessing images
    lm3D = load_lm3d()
    n = 0

    # build reconstruction model
    with tf.Graph().as_default() as graph, tf.device('/cpu:0'):

        images = tf.placeholder(name='input_imgs',
                                shape=[None, 224, 224, 3],
                                dtype=tf.float32)
        graph_def = load_graph('network/FaceReconModel.pb')
        tf.import_graph_def(graph_def,
                            name='resnet',
                            input_map={'input_imgs:0': images})

        # output coefficients of R-Net (dim = 257)
        coeff = graph.get_tensor_by_name('resnet/coeff:0')

        with tf.Session() as sess:
            print('reconstructing...')
            for file in vid_list:
                print(file)
                with iio.get_reader(file) as reader:
                    fps = reader.get_meta_data()['fps']
                    name, ext = os.path.splitext(file)
                    file_name = os.path.basename(name)
                    l_writer = iio.get_writer(os.path.join(
                        out_dir, file_name + ext),
                                              fps=fps)
                    # r_writer = iio.get_writer(os.path.join(out_dir, file_name + '_render' + ext), fps=fps)
                    for i, im in enumerate(reader):
                        print(i)
                        try:
                            # load images and corresponding 5 facial landmarks
                            # img,lm = load_img(file,file.replace('png','txt'))
                            img = Image.fromarray(im)
                            np_img = np.array(img)
                            lm = get_landmarks(np_img)
                            h, w = np_img.shape[:2]

                            # preprocess input image
                            input_img, lm_new, transform_params = Preprocess(
                                img, lm, lm3D)
                            s = transform_params[2]
                            out_sh = int(np.round(224 / s))
                            out_sh = min(out_sh, min(w, h))

                            coef = sess.run(coeff,
                                            feed_dict={images: input_img})

                            # reconstruct 3D face with output coefficients and face model
                            face_shape, face_texture, face_color, tri, face_projection, z_buffer, landmarks_2d, translation, rotation, projection = Reconstruction(
                                coef, facemodel)

                            # reshape outputs
                            input_img = np.squeeze(input_img)
                            shape = np.squeeze(face_shape, (0))
                            color = np.squeeze(face_color, (0))
                            landmarks_2d = np.squeeze(landmarks_2d, (0))

                            cx, cy = transform_params[3][0], transform_params[
                                4][0]
                            tx, ty = -(w / 2 - cx), -(cy - h / 2)

                            land_im = np_img.copy()
                            for x, y in landmarks_2d:
                                x = int(
                                    np.round((x + (w * s - 224) // 2) / s +
                                             tx))
                                y = int(
                                    np.round((y + (h * s - 224) // 2) / s +
                                             ty))
                                cv2.circle(land_im, (x, y), 2, (0, 255, 0), -1)

                            trans_mat = np.float32([[1, 0, tx], [0, 1, ty]])

                            # plt.imshow(land_im)
                            # plt.show()
                            rendered = renderer.render(
                                shape, color / 255,
                                np.squeeze(tri.astype(np.int) - 1), projection,
                                rotation, translation, (out_sh, out_sh))

                            out = np.zeros((h, w, 4), dtype=np_img.dtype)
                            oo = out_sh // 2
                            print(out_sh, oo, rendered.shape, out.shape)
                            out[h // 2 - oo:h // 2 + oo + out_sh % 2, w // 2 -
                                oo:w // 2 + oo + out_sh % 2, :] = rendered
                            # plt.imshow(out)
                            # plt.show()
                            im_trans = cv2.warpAffine(out, trans_mat, (w, h))

                            alpha = (im_trans[..., 3] / 255).astype(np.uint8)
                            rendered = im_trans[..., :3] * alpha[
                                ...,
                                np.newaxis] + np_img * (1 - alpha[..., None])

                            out_im = np.hstack([np_img, rendered, land_im])
                            l_writer.append_data(out_im)
                            # plt.imshow(rendered)
                            # plt.show()

                            # mesh_im = im.copy()
                            # for x, y in face_projection.squeeze()[::20]:
                            # 	x = int(np.round(x))
                            # 	y = int(np.round(y))
                            # 	cv2.circle(mesh_im, (x, y), 1, (255, 255, 0), -1)

                            # plt.imshow(mesh_im)
                            # plt.show()

                            # save output files
                            # cropped image, which is the direct input to our R-Net
                            # 257 dim output coefficients by R-Net
                            # 68 face landmarks of cropped image
                            # savemat(os.path.join(save_path,os.path.basename(file).replace('.jpg','.mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coef,'landmarks_2d':landmarks_2d,'lm_5p':lm_new})
                            # save_obj(os.path.join(save_path,os.path.basename(file).replace('.jpg','_mesh.obj')),shape,tri,np.clip(color,0,255)/255) # 3D reconstruction face (in canonical view)
                        except Exception as e:
                            l_writer.append_data(np.hstack([im] * 3))
                            print(traceback.print_exc())
def demo(image_path):
	# input and output folder
	save_path = 'output/coeff'	
	save_path2 = 'output/render'
	if image_path[-1] == '/':
		image_path = image_path[:-1]
	name = os.path.basename(image_path)
	print(image_path, name)
	img_list = glob.glob(image_path + '/' + '*.txt')
	img_list = [e[:-4]+'.png' for e in img_list]
	already = glob.glob(save_path + '/' + name + '/*.mat')
	already = [e[len(save_path)+1:-4].replace(name,image_path)+'.png' for e in already]
	ret = list(set(img_list).difference(set(already)))
	img_list = ret
	img_list = sorted(img_list)
	print('img_list len:', len(img_list))
	if not os.path.exists(os.path.join(save_path,name)):
		os.makedirs(os.path.join(save_path,name))
	if not os.path.exists(os.path.join(save_path2,name)):
		os.makedirs(os.path.join(save_path2,name))

	# read BFM face model
	# transfer original BFM model to our model
	if not os.path.isfile('./BFM/BFM_model_front.mat'):
		transferBFM09()

	# read face model
	facemodel = BFM()
	# read standard landmarks for preprocessing images
	lm3D = load_lm3d()
	n = 0
	t1 = time.time()

	# build reconstruction model
	with tf.Graph().as_default() as graph:

		images = tf.placeholder(name = 'input_imgs', shape = [None,224,224,3], dtype = tf.float32)
		graph_def = load_graph('network/FaceReconModel.pb')
		tf.import_graph_def(graph_def,name='resnet',input_map={'input_imgs:0': images})

		# output coefficients of R-Net (dim = 257) 
		coeff = graph.get_tensor_by_name('resnet/coeff:0')

		faceshaper = tf.placeholder(name = "face_shape_r", shape = [1,35709,3], dtype = tf.float32)
		facenormr = tf.placeholder(name = "face_norm_r", shape = [1,35709,3], dtype = tf.float32)
		facecolor = tf.placeholder(name = "face_color", shape = [1,35709,3], dtype = tf.float32)
		rendered = Render_layer(faceshaper,facenormr,facecolor,facemodel,1)

		rstimg = tf.placeholder(name = 'rstimg', shape = [224,224,4], dtype=tf.uint8)
		encode_png = tf.image.encode_png(rstimg)

		with tf.Session() as sess:
			print('reconstructing...')
			for file in img_list:
				n += 1
				# load images and corresponding 5 facial landmarks
				if '_mtcnn' not in image_path:
					img,lm = load_img(file,file[:-4]+'.txt')
				else:
					img,lm = load_img(file,file[:-4].replace(name,name+'_mtcnn')+'.txt')
				file = file.replace(image_path.replace('_mtcnn',''), name)
				# preprocess input image
				input_img,lm_new,transform_params = Preprocess(img,lm,lm3D)
				if n==1:
					transform_firstflame=transform_params
				input_img2,lm_new2 = Preprocess2(img,lm,transform_firstflame)

				coef = sess.run(coeff,feed_dict = {images: input_img})
				
				face_shape_r,face_norm_r,face_color,tri = Reconstruction_for_render(coef,facemodel)
				final_images = sess.run(rendered, feed_dict={faceshaper: face_shape_r.astype('float32'), facenormr: face_norm_r.astype('float32'), facecolor: face_color.astype('float32')})
				result_image = final_images[0, :, :, :]
				result_image = np.clip(result_image, 0., 1.).copy(order='C')
				result_bytes = sess.run(encode_png,{rstimg: result_image*255.0})
				result_output_path = os.path.join(save_path2,file[:-4]+'_render.png')
				with open(result_output_path, 'wb') as output_file:
					output_file.write(result_bytes)

				# reshape outputs
				input_img = np.squeeze(input_img)
				im = Image.fromarray(input_img[:,:,::-1])
				cropped_output_path = os.path.join(save_path2,file[:-4]+'.png')
				im.save(cropped_output_path)

				input_img2 = np.squeeze(input_img2)
				im = Image.fromarray(input_img2[:,:,::-1])
				cropped_output_path = os.path.join(save_path2,file[:-4]+'_input2.png')
				im.save(cropped_output_path)

				# save output files
				savemat(os.path.join(save_path,file[:-4]+'.mat'),{'coeff':coef,'lm_5p':lm_new2-lm_new})
	t2 = time.time()
	print('Total n:', n, 'Time:', t2-t1)
def demo():
    # input and output folder
    image_path = 'input'
    save_path = 'output'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    img_list = glob.glob(image_path + '/' + '*.png')
    img_list += glob.glob(image_path + '/' + '*.jpg')

    # read BFM face model
    # transfer original BFM model to our model
    if not os.path.isfile('./BFM/BFM_model_front.mat'):
        transferBFM09()

    # read standard landmarks for preprocessing images
    lm3D = load_lm3d()
    batchsize = 1
    n = 0

    # build reconstruction model
    with tf.Graph().as_default() as graph, tf.device('/cpu:0'):

        FaceReconstructor = Face3D()
        images = tf.placeholder(name='input_imgs',
                                shape=[batchsize, 224, 224, 3],
                                dtype=tf.float32)
        graph_def = load_graph('network/FaceReconModel.pb')
        tf.import_graph_def(graph_def,
                            name='resnet',
                            input_map={'input_imgs:0': images})

        # output coefficients of R-Net (dim = 257)
        coeff = graph.get_tensor_by_name('resnet/coeff:0')

        # reconstructing faces
        FaceReconstructor.Reconstruction_Block(coeff, batchsize)
        face_shape = FaceReconstructor.face_shape_t
        face_texture = FaceReconstructor.face_texture
        face_color = FaceReconstructor.face_color
        landmarks_2d = FaceReconstructor.landmark_p
        recon_img = FaceReconstructor.render_imgs
        tri = FaceReconstructor.facemodel.face_buf

        with tf.Session() as sess:
            print('reconstructing...')
            for file in img_list:
                n += 1
                print(n)
                # load images and corresponding 5 facial landmarks
                img, lm = load_img(
                    file,
                    file.replace('png', 'txt').replace('jpg', 'txt'))
                # preprocess input image
                input_img, lm_new, transform_params = Preprocess(img, lm, lm3D)

                coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
                 face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

                # reshape outputs
                input_img = np.squeeze(input_img)
                face_shape_ = np.squeeze(face_shape_, (0))
                face_texture_ = np.squeeze(face_texture_, (0))
                face_color_ = np.squeeze(face_color_, (0))
                landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
                if not is_windows:
                    recon_img_ = np.squeeze(recon_img_, (0))

                # save output files
                if not is_windows:
                    savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                     'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                save_obj(
                    os.path.join(
                        save_path,
                        file.split(os.path.sep)[-1].replace(
                            '.png', '_mesh.obj').replace('jpg', '_mesh.obj')),
                    face_shape_, tri_,
                    np.clip(face_color_, 0, 255) /
                    255)  # 3D reconstruction face (in canonical view)
def demo(image_path):
	# output folder
	save_dir = 'output/coeff'
	save_coeff_path = save_dir + '/' + image_path
	img_list = glob.glob(image_path + '/*.txt')
	img_list = img_list + glob.glob(image_path + '/*/*.txt')
	img_list = img_list + glob.glob(image_path + '/*/*/*.txt')
	img_list = img_list + glob.glob(image_path + '/*/*/*/*.txt')
	img_list = [e[:-4]+'.jpg' for e in img_list]
	already = glob.glob(save_coeff_path + '/*.mat')
	already = already + glob.glob(save_coeff_path + '/*/*.mat')
	already = already + glob.glob(save_coeff_path + '/*/*/*.mat')
	already = already + glob.glob(save_coeff_path + '/*/*/*/*.mat')
	already = [e[len(save_dir)+1:-4]+'.jpg' for e in already]
	ret = list(set(img_list).difference(set(already)))
	img_list = ret
	img_list = sorted(img_list)
	print('img_list len:', len(img_list))
	if not os.path.exists(os.path.join(save_dir,image_path)):
		os.makedirs(os.path.join(save_dir,image_path))
	for img in img_list:
		if not os.path.exists(os.path.join(save_dir,os.path.dirname(img))):
			os.makedirs(os.path.join(save_dir,os.path.dirname(img)))

	# read BFM face model
	# transfer original BFM model to our model
	if not os.path.isfile('./BFM/BFM_model_front.mat'):
		transferBFM09()

	# read face model
	facemodel = BFM()
	# read standard landmarks for preprocessing images
	lm3D = load_lm3d()
	n = 0
	t1 = time.time()

	# build reconstruction model
	#with tf.Graph().as_default() as graph,tf.device('/cpu:0'):
	with tf.Graph().as_default() as graph:

		images = tf.placeholder(name = 'input_imgs', shape = [None,224,224,3], dtype = tf.float32)
		graph_def = load_graph('network/FaceReconModel.pb')
		tf.import_graph_def(graph_def,name='resnet',input_map={'input_imgs:0': images})

		# output coefficients of R-Net (dim = 257) 
		coeff = graph.get_tensor_by_name('resnet/coeff:0')

		with tf.Session() as sess:
			print('reconstructing...')
			for file in img_list:
				n += 1
				# load images and corresponding 5 facial landmarks
				img,lm = load_img(file,file[:-4]+'.txt')
				# preprocess input image
				input_img,lm_new,transform_params = Preprocess(img,lm,lm3D)

				coef = sess.run(coeff,feed_dict = {images: input_img})

				# save output files
				savemat(os.path.join(save_dir,file[:-4]+'.mat'),{'coeff':coef,'lm_5p':lm_new})
	t2 = time.time()
	print('Total n:', n, 'Time:', t2-t1)
def demo():
    # input and output folder
    image_path = fdata_dir + 'input'
    save_path = fdata_dir + 'output'
    img_list = glob.glob(image_path + '/' + '*.png')

    # read face model
    facemodel = BFM(fdata_dir + "BFM/mSEmTFK68etc.chj")
    is_cuda = True
    facemodel.to_torch(is_torch=True, is_cuda=is_cuda)
    # read standard landmarks for preprocessing images
    lm3D = facemodel.load_lm3d(fdata_dir + "BFM/similarity_Lm3D_all.mat")
    n = 0

    model = resnet50_use()
    model.load_state_dict(torch.load(fdata_dir +
                                     "network/th_model_params.pth"))
    model.eval()

    if is_cuda: model.cuda()

    for param in model.parameters():
        param.requires_grad = False

    print('reconstructing...')
    for file in img_list:
        n += 1
        print(n)
        # load images and corresponding 5 facial landmarks
        img, lm = load_img(file, file.replace('png', 'txt'))

        # preprocess input image
        input_img_org, lm_new, transform_params = Preprocess(img, lm, lm3D)

        input_img = input_img_org.astype(np.float32)
        input_img = torch.from_numpy(input_img).permute(0, 3, 1, 2)
        # the input_img is BGR

        if is_cuda: input_img = input_img.cuda()

        arr_coef = model(input_img)

        coef = torch.cat(arr_coef, 1)

        # reconstruct 3D face with output coefficients and face model
        face_shape, face_texture, face_color, tri, face_projection, z_buffer, landmarks_2d = Reconstruction(
            coef, facemodel)

        # see the landmark
        if 1 == 0:
            input_img_org = input_img_org.squeeze()
            landmarks_2d = landmarks_2d.squeeze()
            img = np.array(input_img_org).copy()
            landmarks_2d[:, 1] = 224 - landmarks_2d[:, 1]
            face2ds = landmarks_2d
            drawCirclev2(img, face2ds)

            key = showimg(img)
            if key == 27: break
            continue

        if is_cuda:
            face_shape = face_shape.cpu()
            face_texture = face_texture.cpu()
            face_color = face_color.cpu()
            face_projection = face_projection.cpu()
            z_buffer = z_buffer.cpu()
            landmarks_2d = landmarks_2d.cpu()

        #exit()
        # reshape outputs
        input_img = np.squeeze(input_img)
        #shape = np.squeeze(face_shape,[0])
        #color = np.squeeze(face_color,[0])
        #landmarks_2d = np.squeeze(landmarks_2d,[0])
        shape = np.squeeze(face_shape)
        color = np.squeeze(face_color)
        #color = np.squeeze(face_texture)
        landmarks_2d = np.squeeze(landmarks_2d)

        # for visualization
        z_buffer -= z_buffer.min()
        z_buffer *= 100
        #face_projection[:, 1] = 224 - face_projection[:, 1]
        #face_projection *= -1
        face3d_project = np.concatenate((face_projection, z_buffer), axis=2)

        # CHJ_INFO: change to show what you want
        shape = np.squeeze(face3d_project)
        #p(face_projection.shape, z_buffer.shape)

        # save output files
        # cropped image, which is the direct input to our R-Net
        # 257 dim output coefficients by R-Net
        # 68 face landmarks of cropped image
        #savemat(os.path.join(save_path,file.split('\\')[-1].replace('.png','.mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coef,'landmarks_2d':landmarks_2d,'lm_5p':lm_new})
        save_obj(
            os.path.join(save_path,
                         file.split('\\')[-1].replace('.png', '_mesh-th.obj')),
            shape, tri + 1,
            np.clip(color, 0, 1))  # 3D reconstruction face (in canonical view)

        # CHJ_INFO: take care !!!!!
        if n > 3: break
Exemple #10
0
    def FaceReconst_Spec_pics(self):
        inDir = self.indir
        # Text_Forth_lineEdit.text().split(';;')
        save_path = self.savepath
        self.mtcnn_Spec_pic()
        # Text_Fifth_lineEdit.text()
        # self.NoteTipsEdit.append("\n-------读取人脸信息-------")
        # detector = MTCNN()
        # cnt = 0
        # for img_path in inDir:
        #     image = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
        #     try:
        #         result = detector.detect_faces(image)
        #         # Result is an array with all the bounding boxes detected. We know that for 'ivan.jpg' there is only one.
        #
        #         bounding_box = result[0]['box']
        #         # print(result)
        #
        #         keypoints = result[0]['keypoints']
        #         land_mask_file = img_path.replace(".jpg", ".txt")
        #         file_write = open(land_mask_file, "w")
        #         x, y = keypoints['left_eye']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['right_eye']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['nose']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['mouth_left']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['mouth_right']
        #         file_write.write(f"{x} {y}\n")
        #         file_write.close()
        #     except:
        #         cnt += 1
        # self._trigger_text.emit("\n-------有{}张提取失败-------".format(cnt))
        # # self.NoteTipsEdit.append("\n-------有{}张提取失败-------".format(cnt))
        import time
        # time.sleep(1)
        if not os.path.isfile('./BFM/BFM_model_front.mat'):
            transferBFM09()

        # read standard landmarks for preprocessing images
        lm3D = load_lm3d()
        batchsize = 1
        n = 0

        with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
            FaceReconstructor = Face3D()
            images = tf.placeholder(name='input_imgs',
                                    shape=[batchsize, 224, 224, 3],
                                    dtype=tf.float32)
            graph_def = self.load_graph('network/FaceReconModel.pb')
            tf.import_graph_def(graph_def,
                                name='resnet',
                                input_map={'input_imgs:0': images})

            # output coefficients of R-Net (dim = 257)
            coeff = graph.get_tensor_by_name('resnet/coeff:0')

            # reconstructing faces
            FaceReconstructor.Reconstruction_Block(coeff, batchsize)
            face_shape = FaceReconstructor.face_shape_t
            face_texture = FaceReconstructor.face_texture
            face_color = FaceReconstructor.face_color
            landmarks_2d = FaceReconstructor.landmark_p
            # recon_img = FaceReconstructor.render_imgs
            tri = FaceReconstructor.facemodel.face_buf
            with tf.Session() as sess:
                # print('-----------reconstructing-----------')
                # self.NoteTipsEdit.append("\n-------正在重建-------")
                self._trigger_text.emit("\n-------正在重建-------")
                QApplication.processEvents()
                # self.Tipstext.append("\n-------正在重建-------")
                cntt = 0

                for file in inDir:
                    try:
                        n += 1
                        print(n)
                        # print(file)
                        # self.NoteTipsEdit.append("\n第"+n+"张正在重建")
                        # load images and corresponding 5 facial landmarks
                        filetxt = os.path.basename(file)
                        # print(filetxt)
                        img, lm = load_img(
                            file,
                            file.replace('png', 'txt').replace('jpg', 'txt'))
                        # print(lm)
                        # preprocess input image
                        input_img, lm_new, transform_params = Preprocess(
                            img, lm, lm3D)

                        # coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
                        # 	face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

                        coeff_, face_shape_, face_texture_, face_color_, landmarks_2d_, tri_ = sess.run(
                            [
                                coeff, face_shape, face_texture, face_color,
                                landmarks_2d, tri
                            ],
                            feed_dict={images: input_img})

                        # reshape outputs
                        input_img = np.squeeze(input_img)
                        face_shape_ = np.squeeze(face_shape_, (0))
                        face_texture_ = np.squeeze(face_texture_, (0))
                        face_color_ = np.squeeze(face_color_, (0))
                        landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
                        # recon_img_ = np.squeeze(recon_img_, (0))

                        # save output files
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        save_obj(
                            os.path.join(
                                save_path,
                                filetxt.replace('.png', '_mesh.obj').replace(
                                    '.jpg', '_mesh.obj')), face_shape_, tri_,
                            np.clip(face_color_, 0, 255) /
                            255)  # 3D reconstruction face (in canonical view)
                    except:
                        cntt += 1
                # self.NoteTipsEdit.append("\n-------有{}张重建失败-------".format(cnt))
                # self.NoteTipsEdit.append("\n-------完毕-------")
                print(cntt)
                self._trigger_text.emit(
                    "\n-------有{}张重建失败-------".format(cntt))
                self._trigger_text.emit("\n-------完毕-------")
                QApplication.processEvents()
                # self.Tipstext.append("\n-------有{}张重建失败-------".format(cntt))
                # self.Tipstext.append("\n-------完毕-------")

        self._trigger.emit()
Exemple #11
0
    def FaceReconst(self):
        self.MtcnnDectect()

        inFileDir = self.indir
        save_path = self.savepath
        img_list = glob.glob(inFileDir + '/' + '*.png')
        img_list += glob.glob(inFileDir + '/' + '*.jpg')
        # read BFM face model
        # transfer original BFM model to our model
        if not os.path.isfile('./BFM/BFM_model_front.mat'):
            transferBFM09()

        # read standard landmarks for preprocessing images
        lm3D = load_lm3d()
        batchsize = 1
        n = 0

        with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
            FaceReconstructor = Face3D()
            images = tf.placeholder(name='input_imgs',
                                    shape=[batchsize, 224, 224, 3],
                                    dtype=tf.float32)
            graph_def = self.load_graph('network/FaceReconModel.pb')
            tf.import_graph_def(graph_def,
                                name='resnet',
                                input_map={'input_imgs:0': images})

            # output coefficients of R-Net (dim = 257)
            coeff = graph.get_tensor_by_name('resnet/coeff:0')

            # reconstructing faces
            FaceReconstructor.Reconstruction_Block(coeff, batchsize)
            face_shape = FaceReconstructor.face_shape_t
            face_texture = FaceReconstructor.face_texture
            face_color = FaceReconstructor.face_color
            landmarks_2d = FaceReconstructor.landmark_p
            # recon_img = FaceReconstructor.render_imgs
            tri = FaceReconstructor.facemodel.face_buf
            with tf.Session() as sess:
                # print('-----------reconstructing-----------')
                # self.TextEdit.append("\n-------正在重建-------")
                # self.NoteTipsEdit.append("\n-------正在重建-------")
                self._trigger_pic_text.emit("\n-------正在重建-------")
                QApplication.processEvents()
                cnt = 0
                for file in img_list:
                    try:
                        n += 1
                        print(n)
                        # self.NoteTipsEdit.append("\n第"+n+"张正在重建")
                        # load images and corresponding 5 facial landmarks
                        img, lm = load_img(
                            file,
                            file.replace('png', 'txt').replace('jpg', 'txt'))
                        # preprocess input image
                        input_img, lm_new, transform_params = Preprocess(
                            img, lm, lm3D)

                        # coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
                        # 	face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

                        coeff_, face_shape_, face_texture_, face_color_, landmarks_2d_, tri_ = sess.run(
                            [
                                coeff, face_shape, face_texture, face_color,
                                landmarks_2d, tri
                            ],
                            feed_dict={images: input_img})

                        # reshape outputs
                        input_img = np.squeeze(input_img)
                        face_shape_ = np.squeeze(face_shape_, (0))
                        face_texture_ = np.squeeze(face_texture_, (0))
                        face_color_ = np.squeeze(face_color_, (0))
                        landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
                        # recon_img_ = np.squeeze(recon_img_, (0))

                        # save output files
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        save_obj(
                            os.path.join(
                                save_path,
                                file.split(os.path.sep)[-1].replace(
                                    '.png',
                                    '_mesh.obj').replace('jpg', '_mesh.obj')),
                            face_shape_, tri_,
                            np.clip(face_color_, 0, 255) /
                            255)  # 3D reconstruction face (in canonical view)
                    except:
                        cnt += 1
                # self.NoteTipsEdit.append("\n-------有{}张重建失败-------".format(cnt))
                # self.NoteTipsEdit.append("\n-------完毕-------")
                self._trigger_pic_text.emit(
                    "\n-------有{}张重建失败-------".format(cnt))
                self._trigger_pic_text.emit("\n-------完毕-------")
                # self.TextEdit.append("\n-------有{}张重建失败-------".format(cnt))
                # self.TextEdit.append("\n-------完毕-------")
                QApplication.processEvents()

        self.trigger.emit()