示例#1
0
def run():

	###################################################
	##### Prepare images ##############################
	countIms = 0
	with open(fileList, "r") as ins, open(data_out + "/imList.txt","w") as outs:
		for image_path in ins:
			# image_path = image_path[:-1]
			print("> Prepare image "+image_path + ":")
			imname = ntpath.basename(image_path)
			#imname = imname[:-4]
			imname = imname.split(imname.split('.')[-1])[0][0:-1]
			img = cv2.imread(image_path)
			## If we have input landmarks
			if len(landmarkDir) > 0:
				lms = np.loadtxt(landmarkDir + '/' + imname + '.pts')
				img2 = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
				nLM = lms.shape[0]
				for i in range(0,nLM):
					cv2.circle(img2, (lms[i,0], lms[i,1]), 5, (255,0,0))
				img, lms = utils.cropByInputLM(img, lms, img2)
			else:
				dlib_img = io.imread(image_path)
				img2 = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
				dets = detector(img, 1)
				print(">     Number of faces detected: {}".format(len(dets)))
				if len(dets) == 0:
					print('> Could not detect the face, skipping the image...' + image_path)
					continue
				if len(dets) > 1:
					print("> Process only the first detected face!")
				detected_face = dets[0]
				## If we are using landmarks to crop
				shape = predictor(dlib_img, detected_face)
				nLM = shape.num_parts
				for i in range(0,nLM):
					cv2.circle(img2, (shape.part(i).x, shape.part(i).y), 5, (255,0,0))
				img, lms = utils.cropByLM(img, shape, img2)
			cv2.imwrite(data_out + "/imgs/"+imname+"_detect.png",img2)

			lms = lms * 500.0/img.shape[0]
			fileout = open(data_out + "/imgs/"+imname + ".pts","w")
			for i in range(0,lms.shape[0]):
				fileout.write("%f %f\n" % (lms[i,0], lms[i,1]))
			fileout.close()
			img = cv2.resize(img,(500, 500))
			cv2.imwrite(data_out + "/imgs/"+imname+ ".png",img)
			outs.write("%s\n" % (data_out + "/imgs/"+imname+ ".png"))
			countIms = countIms + 1

	###################################################
	##### Shape fitting ############################## 
	# load net
	MainModel = imp.load_source('MainModel', "../CNN/shape_model.py")
	net = torch.load(model_path)
	net.eval()

	mean0 = np.load(mean_path, encoding='latin1')
	mean = mean0['arr_0']
	net.cuda()

	print('> CNN Model loaded to regress 3D Shape and Texture!')
	model = scipy.io.loadmat(BFM_path,squeeze_me=True,struct_as_record=False)
	model = model["BFM"]
	faces = model.faces-1
	print('> Loaded the Basel Face Model to write the 3D output!')
	## For loop over the input images
	count = 0
	with open(data_out + "/imList.txt", "r") as ins:
		for image_path in ins:
			if len(image_path) < 3:
				continue
			image_path = image_path[:-1]
			count = count + 1
			fig_name = ntpath.basename(image_path)
			outFile = data_out + "/shape/" + fig_name[:-4]
			print('> Processing image: ' + image_path)
			im = cv2.imread(image_path)
			im = cv2.resize(im, (224, 224)).astype(float).transpose((2,0,1))
			im = im - mean
			#im = im/255
			im = Variable(torch.from_numpy(im).unsqueeze(0).float().cuda())
			features = net(im).data.cpu().numpy()
			## Writing the regressed 3DMM parameters
			np.savetxt(outFile + '.ply.alpha', features[0,0:99])
			S,T = utils.projectBackBFM(model,features[0,:])
			print('> Writing 3D file in: ', outFile + '.ply')
			utils.write_ply(outFile + '.ply', S, T, faces)

	##################################################
	##### Bump map regression ########################
	print("Regress bump maps")
	bumpMapRegressor.estimateBump(bumpModel_path, data_out + "/imList.txt", data_out + "/bump/")
	##################################################
	##### Recover the 3D models ##################
	print("Recover the 3D models")
	print("./TestBump -batch " + data_out + "/imList.txt " + data_out + "/3D/ " + data_out + "/shape " + data_out + "/bump " + data_out + "/bump ../3DMM_model/BaselFaceModel_mod.h5 ../dlib_model/shape_predictor_68_face_landmarks.dat " + data_out + "/imgs " + data_out + "/imgs/ 1");
	os.system("./TestBump -batch " + data_out + "/imList.txt " + data_out + "/3D/ " + data_out + "/shape " + data_out + "/bump " + data_out + "/bump ../3DMM_model/BaselFaceModel_mod.h5 ../dlib_model/shape_predictor_68_face_landmarks.dat " + data_out + "/imgs " + data_out + "/imgs/ 1");
示例#2
0
## For loop over the input images
count = 0
listImgs = glob("tmp_ims/*.png")
for image_path in listImgs:
	start_each_image = time.time()
	count = count + 1
	fig_name = ntpath.basename(image_path)
	outFile = data_out + "/" + fig_name[:-4]
	print '> Processing image: ', image_path, ' ', fig_name, ' ', str(count) + '/' + str(len(listImgs))
	net.blobs['data'].reshape(1,3,trg_size,trg_size)
	im = caffe.io.load_image(image_path)
	## Transforming the image into the right format
	net.blobs['data'].data[...] = transformer.preprocess('data', im)
	## Forward pass into the CNN
	net_output = net.forward()
	## Getting the output
	features = np.hstack( [net.blobs[layer_name].data[0].flatten()] )
	## Writing the regressed 3DMM parameters
	np.savetxt(outFile + '.ply.alpha', features[0:99])
	np.savetxt(outFile + '.ply.beta', features[99:198])
	#################################
	## Mapping back the regressed 3DMM into the original
	## Basel Face Model (Shape)
	##################################
	S,T = utils.projectBackBFM(model,features)
	print '> Writing 3D file in: ', outFile + '.ply' + 'cost {0}s.'.format(time.time()-start_each_image)
	utils.write_ply(outFile + '.ply', S, T, faces)

end_time = time.time()
print "Costtime{0}s".format(end_time - start_time)
示例#3
0
def cnn_3dmm( img, outputPath ):
    """
    use 3dmm cnn to deal with
    """
    # load net
    try:
        caffe.set_mode_gpu()
        caffe.set_device(GPU_ID)
    except Exception as ex:
        print '> Could not setup Caffe in GPU ' +str(GPU_ID) + ' - Error: ' + ex
        print '> Reverting into CPU mode'
        caffe.set_mode_cpu()

    ## Opening mean average image
    proto_data = open(FLAGS.mean_path, "rb").read()
    a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
    mean  = caffe.io.blobproto_to_array(a)[0]
    ## Loading the CNN
    net = caffe.Classifier(FLAGS.deploy_path, FLAGS.model_path)

    ## Setting up the right transformer for an input image
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2,0,1))
    transformer.set_channel_swap('data', (2,1,0))
    transformer.set_raw_scale('data', 255.0)
    transformer.set_mean('data',mean)
    print '> CNN Model loaded to regress 3D Shape and Texture!'
    ## Loading the Basel Face Model to write the 3D output
    model = scipy.io.loadmat(FLAGS.BFM_path,squeeze_me=True,struct_as_record=False)
    model = model["BFM"]
    faces = model.faces-1
    print '> Loaded the Basel Face Model to write the 3D output!'

    net.blobs['data'].reshape(1,3,trg_size,trg_size)
   # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
   # im = img / 255
    imname = ntpath.basename(FLAGS.imagePath)
    imname = imname.split(imname.split('.')[-1])[0][0:-1]       # 记录图片basename

    im = caffe.io.load_image(os.path.join(FLAGS.tmp_ims, imname + '.png'))

    ## Transforming the image into the right format
    net.blobs['data'].data[...] = transformer.preprocess('data', im)
    ## Forward pass into the CNN
    net_output = net.forward()
    ## Getting the output
    features = np.hstack( [net.blobs[layer_name].data[0].flatten()] )

    #imname = ntpath.basename(FLAGS.imagePath)
    #imname = imname.split(imname.split('.')[-1])[0][0:-1]       # 记录图片basename

    ## Writing the regressed 3DMM parameters
    np.savetxt(os.path.join( outputPath, imname+ '.ply.alpha'), features[0:99])
    np.savetxt(os.path.join( outputPath, imname + '.ply.beta'), features[99:198])

    #################################
    ## Mapping back the regressed 3DMM into the original
    ## Basel Face Model (Shape)
    ##################################
    S,T = utils.projectBackBFM(model, features)
    print '> Writing 3D file in: ', os.path.join( outputPath, imname+ '.ply')
    utils.write_ply(os.path.join( outputPath, imname+ '.ply'), S, T, faces)