def f_IMU(self, X, dt, dt2, accel, ori, isMoving, noise):
        """ Transition model
		- 状態方程式
			x_t = f(x_t-1, u) + w
			w ~ N(0, sigma)
		"""

        if (isMoving[0] == False):
            X.v[0] = 0.0
        if (isMoving[1] == False):
            X.v[1] = 0.0
        if (isMoving[2] == False):
            X.v[2] = 0.0

        X_new = Particle()
        X_new.landmarks = X.landmarks

        # Transition with noise (only x,v)
        X_new.x = X.x + dt * X.v + dt2 * X.a + dt2 * noise
        X_new.v = X.v + dt * X.a + dt * noise
        X_new.a = accel
        X_new.o = ori

        #if(isMoving[0] == False):
        #	X_new.v[0] = 0.0
        #if(isMoving[1] == False):
        #	X_new.v[1] = 0.0
        #if(isMoving[2] == False):
        #	X_new.v[2] = 0.0

        return X_new
	def predictionAndUpdateOneParticle_firsttime(self, X, dt, dt2, keypoints, step, P):
		
		"""
		FastSLAM1.0
		"""
		
		weight = 0.0 # weight (return value)
		weights = []
		
		# 姿勢予測 prediction of position
		X_ = Particle()
		X_.landmarks = X.landmarks
		X_.x = X.x + dt*X.v + dt2*X.a
		X_.v = X.v + dt*X.a
		X_.a = X.a
		X_.o = X.o
		
		for keypoint in keypoints:
			# previous landmark id
			prevLandmarkId = (step-1)*10000 + keypoint.prevIndex
			# new landmark id
			landmarkId = step*10000 + keypoint.index
			# The landmark is already observed or not?
			if(X_.landmarks.has_key(prevLandmarkId) == False):
				# Fisrt observation
				# Initialize landmark and append to particle
				landmark = Landmark()
				landmark.init(X_, keypoint, P, self.focus)
				X_.landmarks[landmarkId] = landmark
			else:
				print("Error on pf_step_camera_firsttime")
				
		self.count+=1
		
		return X_, weight
示例#3
0
    def predictionAndUpdateOneParticle_firsttime(self, X, dt, dt2, keypoints,
                                                 step, P):
        """
		FastSLAM1.0
		"""

        weight = 0.0  # weight (return value)
        weights = []

        # 姿勢予測 prediction of position
        X_ = Particle()
        X_.landmarks = X.landmarks
        X_.x = X.x + dt * X.v + dt2 * X.a
        X_.v = X.v + dt * X.a
        X_.a = X.a
        X_.o = X.o

        for keypoint in keypoints:
            # previous landmark id
            prevLandmarkId = (step - 1) * 10000 + keypoint.prevIndex
            # new landmark id
            landmarkId = step * 10000 + keypoint.index
            # The landmark is already observed or not?
            if (X_.landmarks.has_key(prevLandmarkId) == False):
                # Fisrt observation
                # Initialize landmark and append to particle
                landmark = Landmark()
                landmark.init(X_, keypoint, P, self.focus)
                X_.landmarks[landmarkId] = landmark
            else:
                print("Error on pf_step_camera_firsttime")

        self.count += 1

        return X_, weight
	def f_IMU(self, X, dt, dt2, accel, ori, noise):
		""" Transition model
		- 状態方程式
			x_t = f(x_t-1, u) + w
			w ~ N(0, sigma)
		"""
	
		X_new = Particle()
		X_new.landmarks = X.landmarks
	
		# Transition with noise (only x,v)		
		X_new.x = X.x + dt*X.v + dt2*X.a + noise
		X_new.v = X.v + dt*X.a
		#X_new.v = X.v + dt*X.a + noise*25
		#X_new.v = X.v + dt*X.a + noise*50
		X_new.a = accel
		X_new.o = ori
	
		return X_new
示例#5
0
    def f_IMU(self, X, dt, dt2, accel, ori, noise):
        """ Transition model
		- 状態方程式
			x_t = f(x_t-1, u) + w
			w ~ N(0, sigma)
		"""

        X_new = Particle()
        X_new.landmarks = X.landmarks

        # Transition with noise (only x,v)
        X_new.x = X.x + dt * X.v + dt2 * X.a + noise
        X_new.v = X.v + dt * X.a
        #X_new.v = X.v + dt*X.a + noise*25
        #X_new.v = X.v + dt*X.a + noise*50
        X_new.a = accel
        X_new.o = ori

        return X_new
	def predictionAndUpdateOneParticle(self, X, dt, dt2, noise, keypoints, step, P, X1, P1, dt_camera, noise_o):
		
		"""
		FastSLAM1.0
		"""
		
		weight = 0.0 # weight (return value)
		weights = []
		count_of_first_observation = 0
		
		# 姿勢予測 prediction of position
		X_ = Particle()
		X_.landmarks = X.landmarks
		X_.x = X.x + dt*X.v + dt2*X.a + noise
		#X_.v = X.v + dt*X.a
		#X_.v = X.v + dt*X.a + noise*25
		#X_.v = X.v + dt*X.a + noise*50
		X_.a = X.a
		X_.o = X.o
		#X_.o = X.o + noise_o
		
		# 速度調整 velocity adjustment
		#X_.v = ((X_.x - X1.x)/dt_camera)
		X_.v = ((X_.x - X1.x)/dt_camera)*0.25
		#X_.v = ((X_.x - X1.x)/dt_camera)*0.5
		
		# 共面条件モデルのための計算 Calc for Coplanarity
		xyz = np.array([X_.x[0] - X1.x[0], X_.x[1] - X1.x[1], X_.x[2] - X1.x[2]])
		
		# 前回ランドマークの全てのキーを取得しておく あとで消す
		prevLandmarkKeys = []
		for key in X_.landmarks:
			prevLandmarkKeys.append(key)
		
		for keypoint in keypoints:
			
			# ---------------------------- #
			# Calc weight of Inverse Depth #
			# ---------------------------- #
			# previous landmark id
			prevLandmarkId = (step-1)*10000 + keypoint.prevIndex
			# new landmark id
			landmarkId = step*10000 + keypoint.index
			# The landmark is already observed or not?
			if(X_.landmarks.has_key(prevLandmarkId) == False):
				# Fisrt observation
				# Initialize landmark and append to particle
				landmark = Landmark()
				landmark.initPrev(X1, keypoint, P1, self.focus)
				X_.landmarks[landmarkId] = landmark
				if(self.count == 0):
					count_of_first_observation += 1
			else:
				# Already observed
				X_.landmarks[landmarkId] = X_.landmarks[prevLandmarkId]
				#del X_.landmarks[prevLandmarkId]
			# Update landmark and calc weight
			# Observation z				
			z = np.array([keypoint.x, keypoint.y])
			# Calc h and H (Jacobian matrix of h)
			h, Hx, H = X_.landmarks[landmarkId].calcObservation(X_, self.focus)
			# Kalman filter (Landmark update)
			S = H.dot(X_.landmarks[landmarkId].sigma.dot(H.T)) + self.R
			Sinv = np.linalg.inv(S)
			K = X_.landmarks[landmarkId].sigma.dot(H.T.dot(Sinv))
			X_.landmarks[landmarkId].mu += K.dot(z - h)
			X_.landmarks[landmarkId].sigma = X_.landmarks[landmarkId].sigma - K.dot(H.dot(X_.landmarks[landmarkId].sigma))
			# Calc weight
			w = 0.0
			try:
				#w = (1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * S) ))) * np.exp( -0.5 * ( (z-h).T.dot(Sinv.dot(z-h)) ) )
				#w = (1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * self.R) ))) * np.exp( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
				# log likelihood
				#w = np.log(1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * self.R) ))) + ( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
				w = ( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
			except:
				print("Error on calc inverse weight ******")
				w = 0.0
			
			# -------------------------- #
			# Calc weight of Coplanarity #
			# -------------------------- #
			# Generate uvw1 (time:t-1)
			uvf1 = np.array([keypoint.x1, -keypoint.y1, -self.focus]) # Camera coordinates -> Device coordinates
			rotX = Util.rotationMatrixX(X1.o[0])
			rotY = Util.rotationMatrixY(X1.o[1])
			rotZ = Util.rotationMatrixZ(X1.o[2])
			# uvw1 = R(z)R(y)R(x)uvf1
			uvw1 = np.dot(rotZ,np.dot(rotY,np.dot(rotX,uvf1)))
			uvw1 /= 100.0 # Adjust scale to decrease calculation error. This doesn't have an influence to estimation.
			# Generate uvw2 (time:t)
			uvf2 = np.array([keypoint.x, -keypoint.y, -self.focus]) # Camera coordinates -> Device coordinates
			rotX = Util.rotationMatrixX(X_.o[0])
			rotY = Util.rotationMatrixY(X_.o[1])
			rotZ = Util.rotationMatrixZ(X_.o[2])
			# uvw2 = R(z)R(y)R(x)uvf2
			uvw2 = np.dot(rotZ,np.dot(rotY,np.dot(rotX,uvf2)))
			uvw2 /= 100.0 # Adjust scale to decrease calculation error. This doesn't have an influence to estimation.
			# Generate coplanarity matrix
			coplanarity_matrix = np.array([xyz,uvw1,uvw2])
			# Calc determinant
			determinant = np.linalg.det(coplanarity_matrix)
			# Weight
			w_coplanarity = 0.0
			try:
				#w_coplanarity = (1.0 / (math.sqrt( 2.0 * math.pi * self.noise_coplanarity**2 ))) * np.exp((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )
				# log likelihood				
				#w_coplanarity = np.log(1.0 / (math.sqrt( 2.0 * math.pi * self.noise_coplanarity**2 ))) + ((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )				
				w_coplanarity = ((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )
			except:
				print("Error on calc coplanarity weight ******")
				w_coplanarity = 0.0
			
			# --------------------------- #
			# inverse depth * coplanarity #
			# --------------------------- #
			if(self.count == 0):
				#print(w),
				#print(w_coplanarity)
				pass
			
			w = w + w_coplanarity  # use inverse depth * coplanarity log likelihood
			#w = w_coplanarity      # use only coplanarity log likelihood
			#w = w                  # use only inverse depth log likelihood
			
			weights.append(w)
			
		# ----------------------------------- #
		# sum log likelihood of all keypoints #
		# ----------------------------------- #
		for i,w in enumerate(weights):
			if(i==0):
				weight = w
			else:
				weight += w
				
		# ----------------------------- #
		# Average of log likelihood sum #
		# ----------------------------- #
		weight /= float(len(weights))
		
		###############################
		#print("weight "+str(weight))
		if(self.count == 0):
			#print("weight "+str(weight))
			#print("first_ratio "+str(float(count_of_first_observation)/float(len(keypoints)))),
			#print("("+str(count_of_first_observation)+"/"+str(len(keypoints))+")")
			pass
		###########################
			
		# 前回ランドマークをすべて消す
		for key in prevLandmarkKeys:
			del X_.landmarks[key]
			
		self.count+=1
		
		return X_, weight
    def predictionAndUpdateOneParticle(self, X, dt, dt2, noise, keypoints,
                                       step, P):
        """
		FastSLAM1.0
		"""

        weight = 0.0  # weight (return value)
        weights = []
        count_of_known_keypoints = 0

        # 姿勢予測 prediction of position
        X_ = Particle()
        X_.landmarks = X.landmarks
        X_.x = X.x + dt * X.v + dt2 * X.a + self.dt2_camera * noise
        X_.v = X.v + dt * X.a + self.dt_camera * noise
        X_.a = X.a
        X_.o = X.o

        for keypoint in keypoints:
            # previous landmark id
            prevLandmarkId = (step - 1) * 10000 + keypoint.prevIndex
            # new landmark id
            landmarkId = step * 10000 + keypoint.index
            # The landmark is already observed or not?
            if (X_.landmarks.has_key(prevLandmarkId) == False):
                # Fisrt observation
                # Initialize landmark and append to particle
                landmark = Landmark()
                landmark.init(X_, keypoint, P, self.focus)
                X_.landmarks[landmarkId] = landmark
            else:
                # Already observed
                count_of_known_keypoints += 1
                X_.landmarks[landmarkId] = X_.landmarks[prevLandmarkId]
                del X_.landmarks[prevLandmarkId]
                # Observation z
                z = np.array([keypoint.x, keypoint.y])
                # Calc h and H (Jacobian matrix of h)
                h, Hx, H = X_.landmarks[landmarkId].calcObservation(
                    X_, self.focus)
                # Kalman filter (Landmark update)
                X_.landmarks[landmarkId].mu, X_.landmarks[
                    landmarkId].sigma, S, Sinv = KF.execEKF1Update(
                        z, h, X_.landmarks[landmarkId].mu,
                        X_.landmarks[landmarkId].sigma, H, self.R)
                # Calc weight
                w = 0.0
                try:
                    w = (1.0 / (math.sqrt(np.linalg.det(2.0 * math.pi * S)))
                         ) * np.exp(-0.5 * ((z - h).T.dot(Sinv.dot(z - h))))
                except:
                    print("Error on calc weight: ")
                weights.append(w)
                ###############################
                if (self.count == 0):
                    #print("z "),
                    #print(z)
                    #print("h "),
                    #print(h)
                    #print((math.sqrt( np.linalg.det(2.0 * math.pi * S) )))
                    pass
                ###############################

        for w in weights:
            weight += w
        """
		if(count_of_known_keypoints > 0):
			for i in xrange(len(weights)):
				if(i==0):
					weight = (100*w)
				else:
					weight *= (100*w)
		"""

        ###############################
        #print("weight "+str(weight))
        if (self.count == 0):
            #print("weight "+str(weight))
            pass
        ###########################
        self.count += 1
        ###########################
        return X_, weight
    def predictionAndUpdateOneParticle2(self, X, dt, dt2, keypoints, step, P):
        """
		FastSLAM2.0
		"""

        RSS = []  # residual sum of squares
        weight = 0.0  # weight (return value)
        weights = []
        count_of_known_keypoints = 0
        x_diff_sum = np.array([0.0, 0.0, 0.0])
        x_diffs = []

        # 姿勢予測 prediction of position
        X_ = Particle()
        X_.landmarks = X.landmarks
        X_.x = X.x + dt * X.v + dt2 * X.a
        X_.v = X.v + dt * X.a
        X_.a = X.a
        X_.o = X.o

        for keypoint in keypoints:
            #############################
            start_time_ = time.clock()
            #############################
            # previous landmark id
            prevLandmarkId = (step - 1) * 10000 + keypoint.prevIndex
            # new landmark id
            landmarkId = step * 10000 + keypoint.index
            # The landmark is already observed or not?
            if (X_.landmarks.has_key(prevLandmarkId) == False):
                # Fisrt observation
                # Initialize landmark and append to particle
                landmark = Landmark()
                landmark.init(X, keypoint, P, self.focus)
                X_.landmarks[landmarkId] = landmark
            else:
                # Already observed
                count_of_known_keypoints += 1
                X_.landmarks[landmarkId] = X_.landmarks[prevLandmarkId]
                del X_.landmarks[prevLandmarkId]

                # Actual observation z
                z = np.array([keypoint.x, keypoint.y])

                # 計測予測 prediction of observation
                #  Calc h(x), Hx, Hm (Jacobian matrix of h with respect to X and Landmark)
                z__, Hx, Hm = X_.landmarks[landmarkId].calcObservation(
                    X_, self.focus)

                # 姿勢のカルマンフィルタ Kalman filter of position
                """
				mu_ = copy.deepcopy(X_.landmarks[landmarkId].mu)
				sigma_ = copy.deepcopy(X_.landmarks[landmarkId].sigma)
				
				S = Hm.dot(sigma_.dot(Hm.T)) + self.R
				L = S + Hx.dot(self.Q.dot(Hx.T))
				Sinv = np.linalg.inv(S)
				Linv = np.linalg.inv(L)
				sigmax = np.linalg.inv( Hx.T.dot(Sinv.dot(Hx)) + np.linalg.inv(self.Q) )
				mux = sigmax.dot(Hx.T.dot(Sinv.dot(z - z__)))
				
				# 姿勢のサンプリング sampling of position
				x_diff = np.random.multivariate_normal(mux, sigmax)
				x_diffs.append(x_diff)
				x_diff_sum += x_diff
				
				# 計測再予測 reprediction of observation
				z_ = X_.landmarks[landmarkId].h(X_.x + x_diff, X_.o, self.focus)
				
				# ランドマークの予測 prediction of landmark
				K = X_.landmarks[landmarkId].sigma.dot(Hm.T.dot(Sinv))
				X_.landmarks[landmarkId].mu += K.dot(z - z_)
				X_.landmarks[landmarkId].sigma = X_.landmarks[landmarkId].sigma - K.dot(Hm.dot(X_.landmarks[landmarkId].sigma))
				"""

                S = Hm.dot(X_.landmarks[landmarkId].sigma.dot(Hm.T)) + self.R
                L = S + Hx.dot(self.Q.dot(Hx.T))
                Sinv = np.linalg.inv(S)
                Linv = np.linalg.inv(L)
                sigmax = np.linalg.inv(
                    Hx.T.dot(Sinv.dot(Hx)) + np.linalg.inv(self.Q))
                mux = sigmax.dot(Hx.T.dot(Sinv.dot(z - z__)))

                # 姿勢のサンプリング sampling of position
                x_diff = np.random.multivariate_normal(mux, sigmax)
                x_diffs.append(x_diff)
                x_diff_sum += x_diff

                # 計測再予測 reprediction of observation
                z_, XYZ = X_.landmarks[landmarkId].h(X_.x + x_diff, X_.o,
                                                     self.focus)

                # ランドマークの予測 prediction of landmark
                K = X_.landmarks[landmarkId].sigma.dot(Hm.T.dot(Sinv))
                X_.landmarks[landmarkId].mu += K.dot(z - z_)
                X_.landmarks[
                    landmarkId].sigma = X_.landmarks[landmarkId].sigma - K.dot(
                        Hm.dot(X_.landmarks[landmarkId].sigma))

                # 重み計算 calc weight
                rss_ = (z - z_).T.dot(Linv.dot(z - z_))
                RSS.append(rss_)
                w = (1.0 /
                     (math.sqrt(np.linalg.det(2.0 * math.pi * L)))) * np.exp(
                         -0.5 * (rss_))
                weights.append(w)

                ###############################
                end_time_ = time.clock()
                if (self.count == 0):
                    #print(XYZ)
                    #print(x_diff)
                    #print ""+str(landmarkId)+" update time = %f" %(end_time_-start_time_)
                    pass
                ###############################

        if (count_of_known_keypoints > 0):
            # 残差が最小のときの重みと位置を採用する
            max_index = RSS.index(min(RSS))
            weight = weights[max_index]
            weight *= 1000
            X_.x += x_diffs[max_index]
            X_.v += (2.0 * x_diffs[max_index]) / self.dt_camera

        ###############################
        #print("weight "+str(weight))
        #if(self.count == 0):
        #print("weight "+str(weight))
        ###########################
        self.count += 1
        ###########################

        return X_, weight
示例#9
0
    def predictionAndUpdateOneParticle(self, X, dt, dt2, noise, keypoints,
                                       step, P, X1, P1, dt_camera, noise_o):
        """
		FastSLAM1.0
		"""

        weight = 0.0  # weight (return value)
        weights = []
        count_of_first_observation = 0

        # 姿勢予測 prediction of position
        X_ = Particle()
        X_.landmarks = X.landmarks
        X_.x = X.x + dt * X.v + dt2 * X.a + noise
        #X_.v = X.v + dt*X.a
        #X_.v = X.v + dt*X.a + noise*25
        #X_.v = X.v + dt*X.a + noise*50
        X_.a = X.a
        X_.o = X.o
        #X_.o = X.o + noise_o

        # 速度調整 velocity adjustment
        #X_.v = ((X_.x - X1.x)/dt_camera)
        X_.v = ((X_.x - X1.x) / dt_camera) * 0.25
        #X_.v = ((X_.x - X1.x)/dt_camera)*0.5

        # 共面条件モデルのための計算 Calc for Coplanarity
        xyz = np.array(
            [X_.x[0] - X1.x[0], X_.x[1] - X1.x[1], X_.x[2] - X1.x[2]])

        # 前回ランドマークの全てのキーを取得しておく あとで消す
        prevLandmarkKeys = []
        for key in X_.landmarks:
            prevLandmarkKeys.append(key)

        for keypoint in keypoints:

            # ---------------------------- #
            # Calc weight of Inverse Depth #
            # ---------------------------- #
            # previous landmark id
            prevLandmarkId = (step - 1) * 10000 + keypoint.prevIndex
            # new landmark id
            landmarkId = step * 10000 + keypoint.index
            # The landmark is already observed or not?
            if (X_.landmarks.has_key(prevLandmarkId) == False):
                # Fisrt observation
                # Initialize landmark and append to particle
                landmark = Landmark()
                landmark.initPrev(X1, keypoint, P1, self.focus)
                X_.landmarks[landmarkId] = landmark
                if (self.count == 0):
                    count_of_first_observation += 1
            else:
                # Already observed
                X_.landmarks[landmarkId] = X_.landmarks[prevLandmarkId]
                #del X_.landmarks[prevLandmarkId]
            # Update landmark and calc weight
            # Observation z
            z = np.array([keypoint.x, keypoint.y])
            # Calc h and H (Jacobian matrix of h)
            h, Hx, H = X_.landmarks[landmarkId].calcObservation(X_, self.focus)
            # Kalman filter (Landmark update)
            S = H.dot(X_.landmarks[landmarkId].sigma.dot(H.T)) + self.R
            Sinv = np.linalg.inv(S)
            K = X_.landmarks[landmarkId].sigma.dot(H.T.dot(Sinv))
            X_.landmarks[landmarkId].mu += K.dot(z - h)
            X_.landmarks[
                landmarkId].sigma = X_.landmarks[landmarkId].sigma - K.dot(
                    H.dot(X_.landmarks[landmarkId].sigma))
            # Calc weight
            w = 0.0
            try:
                #w = (1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * S) ))) * np.exp( -0.5 * ( (z-h).T.dot(Sinv.dot(z-h)) ) )
                #w = (1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * self.R) ))) * np.exp( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
                # log likelihood
                #w = np.log(1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * self.R) ))) + ( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
                w = (-0.5 * ((z - h).T.dot(self.Rinv.dot(z - h))))
            except:
                print("Error on calc inverse weight ******")
                w = 0.0

            # -------------------------- #
            # Calc weight of Coplanarity #
            # -------------------------- #
            # Generate uvw1 (time:t-1)
            uvf1 = np.array([keypoint.x1, -keypoint.y1, -self.focus
                             ])  # Camera coordinates -> Device coordinates
            rotX = Util.rotationMatrixX(X1.o[0])
            rotY = Util.rotationMatrixY(X1.o[1])
            rotZ = Util.rotationMatrixZ(X1.o[2])
            # uvw1 = R(z)R(y)R(x)uvf1
            uvw1 = np.dot(rotZ, np.dot(rotY, np.dot(rotX, uvf1)))
            uvw1 /= 100.0  # Adjust scale to decrease calculation error. This doesn't have an influence to estimation.
            # Generate uvw2 (time:t)
            uvf2 = np.array([keypoint.x, -keypoint.y, -self.focus
                             ])  # Camera coordinates -> Device coordinates
            rotX = Util.rotationMatrixX(X_.o[0])
            rotY = Util.rotationMatrixY(X_.o[1])
            rotZ = Util.rotationMatrixZ(X_.o[2])
            # uvw2 = R(z)R(y)R(x)uvf2
            uvw2 = np.dot(rotZ, np.dot(rotY, np.dot(rotX, uvf2)))
            uvw2 /= 100.0  # Adjust scale to decrease calculation error. This doesn't have an influence to estimation.
            # Generate coplanarity matrix
            coplanarity_matrix = np.array([xyz, uvw1, uvw2])
            # Calc determinant
            determinant = np.linalg.det(coplanarity_matrix)
            # Weight
            w_coplanarity = 0.0
            try:
                #w_coplanarity = (1.0 / (math.sqrt( 2.0 * math.pi * self.noise_coplanarity**2 ))) * np.exp((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )
                # log likelihood
                #w_coplanarity = np.log(1.0 / (math.sqrt( 2.0 * math.pi * self.noise_coplanarity**2 ))) + ((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )
                w_coplanarity = ((determinant**2) /
                                 (-2.0 * (self.noise_coplanarity**2)))
            except:
                print("Error on calc coplanarity weight ******")
                w_coplanarity = 0.0

            # --------------------------- #
            # inverse depth * coplanarity #
            # --------------------------- #
            if (self.count == 0):
                #print(w),
                #print(w_coplanarity)
                pass

            w = w + w_coplanarity  # use inverse depth * coplanarity log likelihood
            #w = w_coplanarity      # use only coplanarity log likelihood
            #w = w                  # use only inverse depth log likelihood

            weights.append(w)

        # ----------------------------------- #
        # sum log likelihood of all keypoints #
        # ----------------------------------- #
        for i, w in enumerate(weights):
            if (i == 0):
                weight = w
            else:
                weight += w

        # ----------------------------- #
        # Average of log likelihood sum #
        # ----------------------------- #
        weight /= float(len(weights))

        ###############################
        #print("weight "+str(weight))
        if (self.count == 0):
            #print("weight "+str(weight))
            #print("first_ratio "+str(float(count_of_first_observation)/float(len(keypoints)))),
            #print("("+str(count_of_first_observation)+"/"+str(len(keypoints))+")")
            pass
        ###########################

        # 前回ランドマークをすべて消す
        for key in prevLandmarkKeys:
            del X_.landmarks[key]

        self.count += 1

        return X_, weight