Пример #1
0
def demo(args):
    # input and output folder
    image_path = args.input
    save_path = args.output
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # read BFM face model
    facemodel = BFM()
    # transfer original BFM model to our model
    if not os.path.isfile('./BFM/BFM_model_front.mat'):
        transferBFM09()

    # read standard landmarks for preprocessing images
    lm3D = load_lm3d()
    batchsize = 1

    # build reconstruction model
    with tf.Graph().as_default() as graph, tf.device('/gpu:0'):

        FaceReconstructor = Face3D()
        images = tf.placeholder(name='input_imgs',
                                shape=[batchsize, 224, 224, 3],
                                dtype=tf.float32)
        graph_def = load_graph('network/FaceReconModel.pb')
        tf.import_graph_def(graph_def,
                            name='resnet',
                            input_map={'input_imgs:0': images})

        # output coefficients of R-Net (dim = 257)
        coeff = graph.get_tensor_by_name('resnet/coeff:0')

        with tf.device('/cpu:0'):
            # renderer layer
            faceshaper = tf.placeholder(name="face_shape_r",
                                        shape=[1, 35709, 3],
                                        dtype=tf.float32)
            facenormr = tf.placeholder(name="face_norm_r",
                                       shape=[1, 35709, 3],
                                       dtype=tf.float32)
            facecolor = tf.placeholder(name="face_color",
                                       shape=[1, 35709, 3],
                                       dtype=tf.float32)
            rendered = Render_layer(faceshaper, facenormr, facecolor,
                                    facemodel, 1)

            rstimg = tf.placeholder(name='rstimg',
                                    shape=[224, 224, 4],
                                    dtype=tf.uint8)
            encode_png = tf.image.encode_png(rstimg)

        # reconstructing faces
        FaceReconstructor.Reconstruction_Block(coeff, batchsize)
        face_shape = FaceReconstructor.face_shape_t
        face_texture = FaceReconstructor.face_texture
        face_color = FaceReconstructor.face_color
        landmarks_2d = FaceReconstructor.landmark_p
        recon_img = FaceReconstructor.render_imgs
        tri = FaceReconstructor.facemodel.face_buf

        # MTCNN Detector
        detector = MTCNN()
        img, lm = load_img_and_lm(image_path, detector)

        with tf.Session() as sess:
            print('reconstructing...')
            # load images and corresponding 5 facial landmarks

            # preprocess input image
            input_img, lm_new, transform_params, posion = Preprocess(
                img, lm, lm3D)

            coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
             face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

            # renderer output
            face_shape_r, face_norm_r, face_color, tri = Reconstruction_for_render(
                coeff_, facemodel)
            final_images = sess.run(rendered,
                                    feed_dict={
                                        faceshaper:
                                        face_shape_r.astype('float32'),
                                        facenormr:
                                        face_norm_r.astype('float32'),
                                        facecolor: face_color.astype('float32')
                                    })
            result_image = final_images[0, :, :, :]
            result_image = np.clip(result_image, 0., 1.).copy(order='C')
            # save renderer output
            result_bytes = sess.run(encode_png, {rstimg: result_image * 255.0})
            result_output_path = os.path.join(
                save_path,
                image_path.split(os.path.sep)[-1].replace(
                    '.png', '_render.png').replace('jpg', '_render.png'))
            with open(result_output_path, 'wb') as output_file:
                output_file.write(result_bytes)

            # get RGB image from RGBA
            rgb_renderer_img, mask = RGBA2RGB(result_image)
            # Paste the 3D rendered image back to the original image
            renderer_3D_input_img = np.copy(img)
            left0 = int(posion[0] * posion[4])
            right0 = int(posion[1] * posion[4])
            up0 = int(posion[2] * posion[4])
            below0 = int(posion[3] * posion[4])
            rgb_renderer_img = cv2.resize(rgb_renderer_img,
                                          (right0 - left0, below0 - up0))
            mask = cv2.resize(mask, (right0 - left0, below0 - up0))
            mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            mask.astype('uint32')
            if left0 < 0:
                mask = mask[:, -left0:]
                rgb_renderer_img = rgb_renderer_img[:, -left0:]
                left0 = 0
            if up0 < 0:
                mask = mask[-up0:, :]
                rgb_renderer_img = rgb_renderer_img[-up0:, :]
                up0 = 0
            if right0 > renderer_3D_input_img.shape[1]:
                mask = mask[:, :-(right0 - renderer_3D_input_img.shape[1])]
                rgb_renderer_img = rgb_renderer_img[:, :-(
                    right0 - renderer_3D_input_img.shape[1])]
                right0 = renderer_3D_input_img.shape[1]
            if below0 > renderer_3D_input_img.shape[0]:
                mask = mask[:-(below0 - renderer_3D_input_img.shape[0]), :]
                rgb_renderer_img = rgb_renderer_img[:-(
                    below0 - renderer_3D_input_img.shape[0]), :]
                below0 = renderer_3D_input_img.shape[0]

            renderer_3D_input_img[
                up0:below0, left0:right0] = renderer_3D_input_img[
                    up0:below0, left0:right0] * mask + rgb_renderer_img
            renderer_3D_input_img = cv2.cvtColor(renderer_3D_input_img,
                                                 cv2.COLOR_BGR2RGB)
            cv2.imwrite(
                os.path.join(
                    save_path,
                    image_path.split(os.path.sep)[-1].replace(
                        '.png', '_renderer_in_original.png').replace(
                            'jpg', '_renderer_in_original.png')),
                renderer_3D_input_img)

            # reshape outputs
            input_img = np.squeeze(input_img)
            face_shape_ = np.squeeze(face_shape_, (0))
            face_texture_ = np.squeeze(face_texture_, (0))
            face_color_ = np.squeeze(face_color_, (0))
            landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
            if not is_windows:
                recon_img_ = np.squeeze(recon_img_, (0))

            # save output files
            if not is_windows:
                savemat(os.path.join(save_path,image_path.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                 'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
            save_obj(
                os.path.join(
                    save_path,
                    image_path.split(os.path.sep)[-1].replace(
                        '.png', '_mesh.obj').replace('jpg', '_mesh.obj')),
                face_shape_, tri_,
                np.clip(face_color_, 0, 255) /
                255)  # 3D reconstruction face (in canonical view)
Пример #2
0
def demo():
	# input and output folder
	args = parse_args()

	image_path = 'input'
	save_path = 'output'	
	if not os.path.exists(save_path):
		os.makedirs(save_path)
	img_list = glob.glob(image_path + '/' + '*.png')
	img_list +=glob.glob(image_path + '/' + '*.jpg')

	# read BFM face model
	# transfer original BFM model to our model
	if not os.path.isfile('./BFM/BFM_model_front.mat'):
		transferBFM09()

	# read standard landmarks for preprocessing images
	lm3D = load_lm3d()
	n = 0

	# build reconstruction model
	with tf.Graph().as_default() as graph:
		
		with tf.device('/cpu:0'):
			opt = Option(is_train=False)
		opt.batch_size = 1
		opt.pretrain_weights = args.pretrain_weights
		FaceReconstructor = Face3D()
		images = tf.placeholder(name = 'input_imgs', shape = [opt.batch_size,224,224,3], dtype = tf.float32)

		if args.use_pb and os.path.isfile('network/FaceReconModel.pb'):
			print('Using pre-trained .pb file.')
			graph_def = load_graph('network/FaceReconModel.pb')
			tf.import_graph_def(graph_def,name='resnet',input_map={'input_imgs:0': images})
			# output coefficients of R-Net (dim = 257) 
			coeff = graph.get_tensor_by_name('resnet/coeff:0')
		else:
			print('Using pre-trained .ckpt file: %s'%opt.pretrain_weights)
			import networks
			coeff = networks.R_Net(images,is_training=False)

		# reconstructing faces
		FaceReconstructor.Reconstruction_Block(coeff,opt)
		face_shape = FaceReconstructor.face_shape_t
		face_texture = FaceReconstructor.face_texture
		face_color = FaceReconstructor.face_color
		landmarks_2d = FaceReconstructor.landmark_p
		recon_img = FaceReconstructor.render_imgs
		tri = FaceReconstructor.facemodel.face_buf


		with tf.Session() as sess:
			if not args.use_pb :
				restore_weights(sess,opt)

			print('reconstructing...')
			for file in img_list:
				n += 1
				print(n)
				# load images and corresponding 5 facial landmarks
				img,lm = load_img(file,file.replace('png','txt').replace('jpg','txt'))
				# preprocess input image
				input_img,lm_new,transform_params = align_img(img,lm,lm3D)

				coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
					face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})


				# reshape outputs
				input_img = np.squeeze(input_img)
				face_shape_ = np.squeeze(face_shape_, (0))
				face_texture_ = np.squeeze(face_texture_, (0))
				face_color_ = np.squeeze(face_color_, (0))
				landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
				if not is_windows:
					recon_img_ = np.squeeze(recon_img_, (0))

				# save output files
				if not is_windows:
					savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
						'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
				save_obj(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','_mesh.obj').replace('.jpg','_mesh.obj')),face_shape_,tri_,np.clip(face_color_,0,255)/255) # 3D reconstruction face (in canonical view)
Пример #3
0
    def FaceReconst_Spec_pics(self):
        inDir = self.indir
        # Text_Forth_lineEdit.text().split(';;')
        save_path = self.savepath
        self.mtcnn_Spec_pic()
        # Text_Fifth_lineEdit.text()
        # self.NoteTipsEdit.append("\n-------读取人脸信息-------")
        # detector = MTCNN()
        # cnt = 0
        # for img_path in inDir:
        #     image = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
        #     try:
        #         result = detector.detect_faces(image)
        #         # Result is an array with all the bounding boxes detected. We know that for 'ivan.jpg' there is only one.
        #
        #         bounding_box = result[0]['box']
        #         # print(result)
        #
        #         keypoints = result[0]['keypoints']
        #         land_mask_file = img_path.replace(".jpg", ".txt")
        #         file_write = open(land_mask_file, "w")
        #         x, y = keypoints['left_eye']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['right_eye']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['nose']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['mouth_left']
        #         file_write.write(f"{x} {y}\n")
        #         x, y = keypoints['mouth_right']
        #         file_write.write(f"{x} {y}\n")
        #         file_write.close()
        #     except:
        #         cnt += 1
        # self._trigger_text.emit("\n-------有{}张提取失败-------".format(cnt))
        # # self.NoteTipsEdit.append("\n-------有{}张提取失败-------".format(cnt))
        import time
        # time.sleep(1)
        if not os.path.isfile('./BFM/BFM_model_front.mat'):
            transferBFM09()

        # read standard landmarks for preprocessing images
        lm3D = load_lm3d()
        batchsize = 1
        n = 0

        with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
            FaceReconstructor = Face3D()
            images = tf.placeholder(name='input_imgs',
                                    shape=[batchsize, 224, 224, 3],
                                    dtype=tf.float32)
            graph_def = self.load_graph('network/FaceReconModel.pb')
            tf.import_graph_def(graph_def,
                                name='resnet',
                                input_map={'input_imgs:0': images})

            # output coefficients of R-Net (dim = 257)
            coeff = graph.get_tensor_by_name('resnet/coeff:0')

            # reconstructing faces
            FaceReconstructor.Reconstruction_Block(coeff, batchsize)
            face_shape = FaceReconstructor.face_shape_t
            face_texture = FaceReconstructor.face_texture
            face_color = FaceReconstructor.face_color
            landmarks_2d = FaceReconstructor.landmark_p
            # recon_img = FaceReconstructor.render_imgs
            tri = FaceReconstructor.facemodel.face_buf
            with tf.Session() as sess:
                # print('-----------reconstructing-----------')
                # self.NoteTipsEdit.append("\n-------正在重建-------")
                self._trigger_text.emit("\n-------正在重建-------")
                QApplication.processEvents()
                # self.Tipstext.append("\n-------正在重建-------")
                cntt = 0

                for file in inDir:
                    try:
                        n += 1
                        print(n)
                        # print(file)
                        # self.NoteTipsEdit.append("\n第"+n+"张正在重建")
                        # load images and corresponding 5 facial landmarks
                        filetxt = os.path.basename(file)
                        # print(filetxt)
                        img, lm = load_img(
                            file,
                            file.replace('png', 'txt').replace('jpg', 'txt'))
                        # print(lm)
                        # preprocess input image
                        input_img, lm_new, transform_params = Preprocess(
                            img, lm, lm3D)

                        # coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
                        # 	face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

                        coeff_, face_shape_, face_texture_, face_color_, landmarks_2d_, tri_ = sess.run(
                            [
                                coeff, face_shape, face_texture, face_color,
                                landmarks_2d, tri
                            ],
                            feed_dict={images: input_img})

                        # reshape outputs
                        input_img = np.squeeze(input_img)
                        face_shape_ = np.squeeze(face_shape_, (0))
                        face_texture_ = np.squeeze(face_texture_, (0))
                        face_color_ = np.squeeze(face_color_, (0))
                        landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
                        # recon_img_ = np.squeeze(recon_img_, (0))

                        # save output files
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        save_obj(
                            os.path.join(
                                save_path,
                                filetxt.replace('.png', '_mesh.obj').replace(
                                    '.jpg', '_mesh.obj')), face_shape_, tri_,
                            np.clip(face_color_, 0, 255) /
                            255)  # 3D reconstruction face (in canonical view)
                    except:
                        cntt += 1
                # self.NoteTipsEdit.append("\n-------有{}张重建失败-------".format(cnt))
                # self.NoteTipsEdit.append("\n-------完毕-------")
                print(cntt)
                self._trigger_text.emit(
                    "\n-------有{}张重建失败-------".format(cntt))
                self._trigger_text.emit("\n-------完毕-------")
                QApplication.processEvents()
                # self.Tipstext.append("\n-------有{}张重建失败-------".format(cntt))
                # self.Tipstext.append("\n-------完毕-------")

        self._trigger.emit()
Пример #4
0
def d3dr(file):
    # read BFM face model
    # transfer original BFM model to our model
    if not os.path.isfile('./BFM/BFM_model_front.mat'):
        transferBFM09()

    # read standard landmarks for preprocessing images
    lm3D = load_lm3d()
    batchsize = 1
    n = 0

    # build reconstruction model
    with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
        FaceReconstructor = Face3D()
        images = tf.placeholder(name='input_imgs',
                                shape=[batchsize, 224, 224, 3],
                                dtype=tf.float32)
        graph_def = load_graph('network/FaceReconModel.pb')
        tf.import_graph_def(graph_def,
                            name='resnet',
                            input_map={'input_imgs:0': images})

        # output coefficients of R-Net (dim = 257)
        coeff = graph.get_tensor_by_name('resnet/coeff:0')

        # reconstructing faces
        FaceReconstructor.Reconstruction_Block(coeff, batchsize)
        face_shape = FaceReconstructor.face_shape_t
        face_texture = FaceReconstructor.face_texture
        face_color = FaceReconstructor.face_color
        landmarks_2d = FaceReconstructor.landmark_p
        recon_img = FaceReconstructor.render_imgs
        tri = FaceReconstructor.facemodel.face_buf

        with tf.Session() as sess:
            print('reconstructing...')
            n += 1
            print(n)
            # load images and corresponding 5 facial landmarks
            img, lm = load_img(
                file,
                file.replace('png', 'txt').replace('jpg', 'txt'))
            # preprocess input image
            input_img, lm_new, transform_params = Preprocess(img, lm, lm3D)

            coeff_, face_shape_, face_texture_, face_color_, landmarks_2d_, recon_img_, tri_ = sess.run(
                [
                    coeff, face_shape, face_texture, face_color, landmarks_2d,
                    recon_img, tri
                ],
                feed_dict={images: input_img})

            # reshape outputs
            input_img = np.squeeze(input_img)
            face_shape_ = np.squeeze(face_shape_, (0))
            face_texture_ = np.squeeze(face_texture_, (0))
            face_color_ = np.squeeze(face_color_, (0))
            landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
            recon_img_ = np.squeeze(recon_img_, (0))

            # save output files
            savemat(
                change_file_extension(file, "mat"), {
                    'cropped_img': input_img[:, :, ::-1],
                    'recon_img': recon_img_,
                    'coeff': coeff_,
                    'face_shape': face_shape_,
                    'face_texture': face_texture_,
                    'face_color': face_color_,
                    'lm_68p': landmarks_2d_,
                    'lm_5p': lm_new
                })
            save_obj(change_file_extension(file, "obj"), face_shape_, tri_,
                     np.clip(face_color_, 0, 255) /
                     255)  # 3D reconstruction face (in canonical view)
Пример #5
0
    def FaceReconst(self):
        self.MtcnnDectect()

        inFileDir = self.indir
        save_path = self.savepath
        img_list = glob.glob(inFileDir + '/' + '*.png')
        img_list += glob.glob(inFileDir + '/' + '*.jpg')
        # read BFM face model
        # transfer original BFM model to our model
        if not os.path.isfile('./BFM/BFM_model_front.mat'):
            transferBFM09()

        # read standard landmarks for preprocessing images
        lm3D = load_lm3d()
        batchsize = 1
        n = 0

        with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
            FaceReconstructor = Face3D()
            images = tf.placeholder(name='input_imgs',
                                    shape=[batchsize, 224, 224, 3],
                                    dtype=tf.float32)
            graph_def = self.load_graph('network/FaceReconModel.pb')
            tf.import_graph_def(graph_def,
                                name='resnet',
                                input_map={'input_imgs:0': images})

            # output coefficients of R-Net (dim = 257)
            coeff = graph.get_tensor_by_name('resnet/coeff:0')

            # reconstructing faces
            FaceReconstructor.Reconstruction_Block(coeff, batchsize)
            face_shape = FaceReconstructor.face_shape_t
            face_texture = FaceReconstructor.face_texture
            face_color = FaceReconstructor.face_color
            landmarks_2d = FaceReconstructor.landmark_p
            # recon_img = FaceReconstructor.render_imgs
            tri = FaceReconstructor.facemodel.face_buf
            with tf.Session() as sess:
                # print('-----------reconstructing-----------')
                # self.TextEdit.append("\n-------正在重建-------")
                # self.NoteTipsEdit.append("\n-------正在重建-------")
                self._trigger_pic_text.emit("\n-------正在重建-------")
                QApplication.processEvents()
                cnt = 0
                for file in img_list:
                    try:
                        n += 1
                        print(n)
                        # self.NoteTipsEdit.append("\n第"+n+"张正在重建")
                        # load images and corresponding 5 facial landmarks
                        img, lm = load_img(
                            file,
                            file.replace('png', 'txt').replace('jpg', 'txt'))
                        # preprocess input image
                        input_img, lm_new, transform_params = Preprocess(
                            img, lm, lm3D)

                        # coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
                        # 	face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})

                        coeff_, face_shape_, face_texture_, face_color_, landmarks_2d_, tri_ = sess.run(
                            [
                                coeff, face_shape, face_texture, face_color,
                                landmarks_2d, tri
                            ],
                            feed_dict={images: input_img})

                        # reshape outputs
                        input_img = np.squeeze(input_img)
                        face_shape_ = np.squeeze(face_shape_, (0))
                        face_texture_ = np.squeeze(face_texture_, (0))
                        face_color_ = np.squeeze(face_color_, (0))
                        landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
                        # recon_img_ = np.squeeze(recon_img_, (0))

                        # save output files
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        # savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'coeff':coeff_,\
                        # 	'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
                        save_obj(
                            os.path.join(
                                save_path,
                                file.split(os.path.sep)[-1].replace(
                                    '.png',
                                    '_mesh.obj').replace('jpg', '_mesh.obj')),
                            face_shape_, tri_,
                            np.clip(face_color_, 0, 255) /
                            255)  # 3D reconstruction face (in canonical view)
                    except:
                        cnt += 1
                # self.NoteTipsEdit.append("\n-------有{}张重建失败-------".format(cnt))
                # self.NoteTipsEdit.append("\n-------完毕-------")
                self._trigger_pic_text.emit(
                    "\n-------有{}张重建失败-------".format(cnt))
                self._trigger_pic_text.emit("\n-------完毕-------")
                # self.TextEdit.append("\n-------有{}张重建失败-------".format(cnt))
                # self.TextEdit.append("\n-------完毕-------")
                QApplication.processEvents()

        self.trigger.emit()