コード例 #1
0
	def reProjectResidualwithX(nx, *args):
		
		shp = args[0]
		Xp_1 = args[1]
		Xp_2 = args[2]
		k = args[3]
		R = args[4]
		t = args[5]

		stackedx = nx[0:shp[0]*shp[1]]
		#x = nx[shp[0]*shp[1]:shp[0]*shp[1]+6]

		Str_4D = stackedx.reshape(shp)

		#R = cv2.Rodrigues(x[0:3])
		#t = np.array(x[3:6]).reshape(3,1)
		
		myC1 = Camera.myCamera(k)
		myC2 = Camera.myCamera(k)
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])
		myC2.projectiveMatrix(np.mat(t),R[0])

		rXp_1 = np.mat(myC1.project(Str_4D))
		rXp_2 = np.mat(myC2.project(Str_4D))
		res_1 = Xp_1 - rXp_1
		res_2 = Xp_2 - rXp_2

		Res = np.hstack((res_1,res_2)).reshape(-1)

		nRes = 2*np.sqrt(np.sum(np.power(Res,2))/len(Res))


		return nRes
コード例 #2
0
	def reProjectResidual(x, *args):
							
		Str_4D = args[0]
		Xp_1 = args[1]
		Xp_2 = args[2]
		k = args[3]

		R = cv2.Rodrigues(x[0:3])
		t = np.array(x[3:6]).reshape(3,1)
		
		myC1 = Camera.myCamera(k)
		myC2 = Camera.myCamera(k)
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])
		myC2.projectiveMatrix(np.mat(t),R[0])

		rXp_1 = np.mat(myC1.project(Str_4D))
		rXp_2 = np.mat(myC2.project(Str_4D))
		res_1 = Xp_1 - rXp_1
		res_2 = Xp_2 - rXp_2

		Res = np.hstack((res_1,res_2)).reshape(-1)

		nRes = 2*np.sqrt(np.sum(np.power(Res,2))/len(Res))
		


		return nRes
コード例 #3
0
ファイル: py3DRec.py プロジェクト: CarlosHVMoraes/py3DRec
#Camera.myCamera.showProjectiveView(Xh_1,'-r')
#Camera.myCamera.showProjectiveView(Xh_2,'-b')

#normaliza os pontos de acordo com a projecao
#Xhn_1, Trh_1 = myC1.normalizePoints(Xh_1)
#Xhn_2, Trh_2 = myC2.normalizePoints(Xh_2)

#Camera.myCamera.showProjectiveView(xn_1,'-r')
#Camera.myCamera.showProjectiveView(xn_2,'-b')

#Xp_1 = np.hstack((Xh_1[:,0], Xh_1[:,1]))
#Xp_2 = np.hstack((Xh_2[:,0], Xh_2[:,1]))



myC1 = Camera.myCamera(k)
myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


#retorna pontos correspondentes
Xp_1, Xp_2 = clsReconstruction.getMathingPoints('b4.jpg','b5.jpg','k_cam_hp.dat')


#evaluate the essential Matrix using the camera parameter(using the original points)
E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)

#evaluate the fundamental matrix (using the normilized points)
#F, mask = cv2.findFundamentalMat(Xp_1,Xp_2,cv2.FM_RANSAC)	
#ki = np.linalg.inv(k)

R1, R2, t = cv2.decomposeEssentialMat(E)
コード例 #4
0
	def sparceRecostructionTrueCase(file1,file2,kdef):

		k = np.mat(clsReconstruction.loadData(kdef))
		ki = np.linalg.inv(k)

		im_1 = cv2.imread(file1)
		im_2 = cv2.imread(file2)

		im_b1 = cv2.cvtColor(im_1,cv2.COLOR_RGB2GRAY)
		im_b2 = cv2.cvtColor(im_2,cv2.COLOR_RGB2GRAY)

		myC1 = Camera.myCamera(k)
		myC2 = Camera.myCamera(k)


		#place camera 1 at origin
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


		#return macthing points
		Xp_1, Xp_2 = clsReconstruction.getMatchingPoints(file1,file2,kdef,30)


		#evaluate the essential Matrix using the camera parameter(using the original points)
		E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)


		#evaluate Fundamental to get the epipolar lines
		#since we already know the camera intrincics, it is better to evaluate F from the correspondence rather than from the 8 points routine
		F = ki.T*np.mat(E)*ki

		 
		#retrive R and t from E
		retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)
		

		#place camera 2
		myC2.projectiveMatrix(np.mat(t),R)


		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,F,im_1,im_2)


		#triangulate points
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T


		#make them euclidian
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		#evaluate reprojection
		Xh_Reprojection_1 = myC1.project(Str_4D)
		Xh_Reprojection_2 = myC2.project(Str_4D)


		#three ways to carry on the bundle adjustment I am using R,t and K as parameters. using the points is too time 
		# consuming although the results are much better; 
		#nR,nt, R0, R1 = clsReconstruction.bundleAdjustment(Str_4D,Xp_1,Xp_2,k,R,t)
		#Str_4D, nR,nt, R0, R1 = clsReconstruction.bundleAdjustmentwithX(Str_4D,Xp_1,Xp_2,k,R,t)	#### not working right now... 

		nk, nR, nt, R0, R1 = clsReconstruction.bundleAdjustmentwithK(Str_4D,Xp_1,Xp_2,k,R,t)
		print('old value {0:.3f}, optimized pose: {1:.3f} \n'.format(R0,R1))
		nki = np.linalg.inv(nk)


		#reevaluate essential and fundamental matrixes
		nE = clsReconstruction.skew(nt)*np.mat(nR)
		nF = nki.T*np.mat(nE)*nki


		#if we use the 3th option, we should reinitiate the cameras	and the essential matrix, once the projective matrix will change
		myC1 = Camera.myCamera(nk)
		myC2 = Camera.myCamera(nk)
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])



		#reevaluate all variables based on new values of nR and nt
		myC2.projectiveMatrix(np.mat(nt),nR)
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		
		#Camera.myCamera.show3Dplot(Str_3D)
		Xh_Opt_1 = myC1.project(Str_4D)#.reshape(-1,2)
		Xh_Opt_2 = myC2.project(Str_4D)#.reshape(-1,2)


		#POSSIBLE IMPLEMENTATION find residuals bigger a threshould value and optimize their location in R3

		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,nF,im_b1,im_b2)

		im = clsReconstruction.drawPoints(im_1,Xp_1,(50,50,250))
		im = clsReconstruction.drawPoints(im,Xh_Reprojection_1,(50,150,100))
		im = clsReconstruction.drawPoints(im,Xh_Opt_1,(250,250,50))

		im2 = clsReconstruction.drawPoints(im_2,Xp_2,(50,50,250))
		im2 = clsReconstruction.drawPoints(im2,Xh_Reprojection_2,(50,150,100))
		im2 = clsReconstruction.drawPoints(im2,Xh_Opt_2,(250,250,50))


		cv2.imshow("im",im)
		cv2.imshow("im2",im2)
		cv2.waitKey(0)