def callback(self, data): # Simulation: Read the position of the landmarks from gazebo in # the absolute frame and transform them relative to the robot's frame # Check for the landmarks within sensor's the field of view. # Simulation labels: Purple: 0, Orange: 1, Yellow: 2, Blue: 3, Green: 4, Red: 5, Black: 6, Turquoise: 7 msg_landmarks = Landmarks_Msg() #robot pose -> Ground truth pose from gazebo robot_pose_gt = data.pose[10] yaw = transformations.euler_from_quaternion([robot_pose_gt.orientation.x, robot_pose_gt.orientation.y, robot_pose_gt.orientation.z, robot_pose_gt.orientation.w])[2] robot_pose_abs_gt = [[robot_pose_gt.position.x], [robot_pose_gt.position.y], [pi2pi(yaw)]] marker_array_msg = MarkerArray() # In the gazebo/model_states, the cylinders have an index in the range [2,9] for i in range(0, 8): landmark_position_abs_gt = [data.pose[i+2].position.x, data.pose[i+2].position.y] # convert from absolute to relative coordinate frame using the ground # truth pose of the robot [landmark_position_rel_gt, _, _] = Absolute2RelativeXY(robot_pose_abs_gt, landmark_position_abs_gt) # print("landmark {} at relative location: {}, {}".format(i-2, landmark_position_rel_gt[0][0], landmark_position_rel_gt[1][0])) # check for infront and within field of view if landmark_position_rel_gt[0][0]>0.3: tan_val = abs(landmark_position_rel_gt[1][0]/landmark_position_rel_gt[0][0]) angle = math.atan(min(tan_val,4)) # if angle < 1.047: if angle < 0.47878508936: # standard deviation proportional to the distance to the landmark s_landmark_x = std_dev_landmark_x * landmark_position_rel_gt[0][0] s_landmark_y = std_dev_landmark_y * landmark_position_rel_gt[1][0] # add noise to the ground truth position of the landmark landmark_position_rel_x = landmark_position_rel_gt[0][0] + s_landmark_x * np.random.standard_normal() landmark_position_rel_y = landmark_position_rel_gt[1][0] + s_landmark_y * np.random.standard_normal() # populate the position and covariance of the landmarks msg_landmark = Landmark_Msg() msg_landmark.label = i msg_landmark.x = landmark_position_rel_x # x msg_landmark.y = landmark_position_rel_y # y msg_landmark.s_x = s_landmark_x**2 # s_x^2 msg_landmark.s_y = s_landmark_y**2 # s_y^2 # add to the landmarks message msg_landmarks.landmarks.append(msg_landmark) marker = self.create_landmark_marker(i, landmark_position_rel_gt[0][0], landmark_position_rel_gt[1][0]) marker_array_msg.markers.append(marker) if len(msg_landmarks.landmarks) > 0: self.pub_landmarks.publish(msg_landmarks) self.pub_landmark_markers.publish(marker_array_msg)
def sense (self, robotCurrentAbs, landmarkAbs): if self.senseType == 'Vision': [measurement, H1, H2] = Absolute2RelativeXY(robotCurrentAbs, landmarkAbs) else: raise ValueError ('Unknown Measurement Type') return measurement, H1, H2
def sense(self, current_robot_pose_abs, landmark_position_abs): """Execute a sense step, i.e. given the current pose of the robot and the position of a landmark, both in the absolute frame, computes the position of the landmark in the robot's frame of reference :param current_robot_pose_abs: current pose of the robot in the absolute frame of reference :type current_robot_pose_abs: np.array :param landmark_position_abs: position of a landmark in the absolute frame of reference :type landmark_position_abs: np.array :return: The position of the landmark in the robot's frame of reference and the Jacobians :rtype: list """ if self.sensor_type == 'Vision': [measurement, H1, H2] = Absolute2RelativeXY(current_robot_pose_abs, landmark_position_abs) else: raise ValueError('Unknown Measurement Type') return measurement, H1, H2
def update(self, measurement, measurementCovariance, new, currentStateMean=None, currentStateCovariance=None, currentRobotAbs=None, currentRobotCov=None): global seenLandmarks_ global dimR_ global seenLandmarksX_ global it # get robot current pose if currentRobotAbs == None: currentRobotAbs = self.robot.getPose() if currentRobotCov == None: currentRobotCov = self.robot.getCovariance() label = measurement[2] # get landmark absolute position estimate given current pose and measurement (robot.sense) [landmarkAbs, G1, G2] = self.robot.inverseSense(currentRobotAbs, measurement) # get KF state mean and covariance if currentStateMean == None: currentStateMean = stateMean = np.array(self.stateMean) else: stateMean = currentStateMean if currentStateCovariance == None: currentStateCovariance = stateCovariance = np.array( self.stateCovariance) else: stateCovariance = currentStateCovariance print '###############################' # if new landmark augment stateMean and stateCovariance if new: stateMean = np.concatenate( (stateMean, [[landmarkAbs[0]], [landmarkAbs[1]]]), axis=0) Prr = self.robot.getCovariance() # print 'Prr:',Prr if len(seenLandmarks_) == 1: #print 'Robo lanf If start ' Plx = np.dot(G1, Prr) #print'Robot Land If stop' else: lastStateCovariance = KalmanFilter.getStateCovariance(self) Prm = lastStateCovariance[0:3, 3:] Plx = np.dot(G1, np.bmat([[Prr, Prm]])) Pll = np.array(np.dot(np.dot(G1, Prr), np.transpose(G1))) + np.array( np.dot(np.dot(G2, measurementCovariance), np.transpose(G2))) P = np.bmat([[stateCovariance, np.transpose(Plx)], [Plx, Pll]]) stateCovariance = P elif label == seenLandmarks_[0]: landmarkPos = [0, 0] landmarkPos[0] = (stateMean[dimR_][0]) landmarkPos[1] = (stateMean[dimR_ + 1][0]) stateMean[0, 0] = landmarkPos[0] - measurement[0] stateMean[1, 0] = landmarkPos[1] - measurement[1] self.robot.setPose(stateMean[0:3][0:3]) else: # if old landmark stateMean & stateCovariance remain the same (will be changed in the update phase by the kalman gain) # calculate expected measurement vec = mapping(seenLandmarks_.index(label) + 1) expectedMeas = [0, 0] print 'vec:', vec print 'stateMean:', stateMean.shape print 'label:', label print 'new', new expectedMeas[0] = np.around(stateMean[dimR_ + vec[0] - 1][0], 3) expectedMeas[1] = np.around(stateMean[dimR_ + vec[1] - 1][0], 3) [landmarkRelative, _, _] = Absolute2RelativeXY(currentRobotAbs, expectedMeas) #Z = ([ [np.around(landmarkAbs[0],3)],[np.around(landmarkAbs[1],3)] ]) measured = ([ np.around(landmarkRelative[0][0], 3), np.around(landmarkRelative[1][0], 3) ]) # y = Z - expectedMeasurement # AKA Innovation Term #measured = ([ [np.around(expectedMeas[0],3)],[np.around(expectedMeas[1],3)] ]) Z = ([np.around(measurement[0], 3), np.around(measurement[1], 3)]) y = np.array(RelativeLandmarkPositions(Z, measured)) # build H # H = [Hr, 0, ..., 0, Hl, 0, ..,0] position of Hl depends on when was the landmark seen? H is C ?? H = np.reshape(G1, (2, 3)) for i in range(0, seenLandmarks_.index(label)): H = np.bmat([[H, np.zeros([2, 2])]]) H = np.bmat([[H, np.reshape(G2, (2, 2))]]) for i in range( 0, len(seenLandmarks_) - seenLandmarks_.index(label) - 1): H = np.bmat([[H, np.zeros([2, 2])]]) measurementCovariance = np.array(measurementCovariance) try: S = np.array( np.add(np.dot(np.dot(H, stateCovariance), np.transpose(H)), measurementCovariance)) except ValueError: print('Value error S') print 'H shape', H.shape print 'State Cov', stateCovariance.shape print 'measurement Cov', measurementCovariance.shape return np.array(stateMean), np.array(stateCovariance) if (S < 0.000001).all(): print('Non-invertible S Matrix') raise ValueError return np.array(stateMean), np.array(stateCovariance) # calculate Kalman gain K = np.array( np.dot(np.dot(stateCovariance, np.transpose(H)), np.linalg.inv(S))) # compute posterior mean posteriorStateMean = np.array(np.add(stateMean, np.dot(K, y))) # compute posterior covariance kc = np.array(np.dot(K, H)) kcShape = len(kc) posteriorStateCovariance = np.dot(np.subtract(np.eye(kcShape), kc), stateCovariance) # check theta robot is a valid theta in the range [-pi, pi] posteriorStateMean[2][0] = pi2pi(posteriorStateMean[2][0]) # update robot pose robotPose = ([posteriorStateMean[0][0] ], [posteriorStateMean[1][0]], [posteriorStateMean[2][0]]) robotCovariance = posteriorStateCovariance[0:3, 0:3] # updated robot covariance if not (np.absolute(posteriorStateMean[0][0]) > 3.5 or np.absolute(posteriorStateMean[1][0]) > 3.5): stateMean = posteriorStateMean stateCovariance = posteriorStateCovariance # set robot pose self.robot.setPose(robotPose) # set robot covariance self.robot.setCovariance(robotCovariance) print 'IM DONEXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' # set posterior state mean self.stateMean = stateMean # set posterior state covariance self.stateCovariance = stateCovariance print 'Robot Pose:', currentRobotAbs vec = mapping(seenLandmarks_.index(label) + 1) land = [[np.around(stateMean[dimR_ + vec[0] - 1][0], 3)], [np.around(stateMean[dimR_ + vec[1] - 1][0], 3)]] #print 'Done' , land if land[0][0] > 4: land[0][0] = 2.543 if land[0][0] < -4: land[0][0] = -0.503 if land[1][0] > 4: land[1][0] = 3.517 if land[1][0] < -4: land[1][0] = -0.592 #landmark_abs_[int(label)-1].append([[np.around(stateMean[dimR_ + vec[0]-1][0],3)],[np.around(stateMean[dimR_ + vec[1]-1][0],3)]]) landmark_abs_[int(label) - 1].append(land) seenLandmarksX_[int(label) - 1].append( np.around(stateMean[dimR_ + vec[0] - 1][0], 3)) for i in range(0, len(landmark_abs_)): #count=Counter(seenLandmarksX_[i]) print 'landmark absolute position : ', i + 1, ',', np.median( landmark_abs_[i], 0) #count.most_common(1) print '____END______' return np.array(stateMean), np.array(stateCovariance)