def __get_scan(self, *args, **kwargs): packet = control.get_output("GetLDSScan", *args, **kwargs) # Get rid of help message. packet.pop("AngleInDegrees", None) # Add rotation speed. ret = {} ret["ROTATION_SPEED"] = float(packet["ROTATION_SPEED"]) packet.pop("ROTATION_SPEED", None) # Discard any errors. for key in packet.keys(): if int(packet[key][2]) == 0: # Convert the angle so that 0 deg is to the right, not up. angle = int(key) real_angle = angle + 90 if real_angle > 359: real_angle -= 359 ret[real_angle] = [int(x) for x in packet[key]] else: log.debug("Error %s in LDS reading for angle %s." % \ (packet[key][2], key)) return ret
def spikes(scan): # How big the sum of the changes around a point must be for it to qualify as a # spike. (mm) spike_threshold = 1000 distances = [scan[angle][0] for angle in scan.keys()] angles = scan.keys() spikes = {} for i in range(0, len(distances)): center = distances[i] # Find the immediate surrounding distances. left = distances[i - 1] if i < len(distances) - 1: right = distances[i + 1] else: right = distances[0] change = (left - center) + (right - center) if change >= spike_threshold: angle = angles[i] log.debug("Found spike with change of %d and angle %d." % (change, angle)) spikes[angle] = center return spikes
def predict(self, dx, dy, dtheta): log.debug("Running SLAM prediction step...") prediction = self.__prediction_model(dx, dy, dtheta, 0) # Update the state. self.X[:3] = prediction log.debug("New state: %s." % (self.X)) # Update the prediction jacobian. self.A = self.__prediction_jacobian(dx, dy) log.debug("New prediction jacobian: %s." % (self.A)) # Compute the process noise. Q = self.__process_noise(dx, dy, dtheta) log.debug("Process noise: %s." % (Q)) # Update covariance for robot position. self.P[0:3, 0:3] = np.dot(np.dot(self.A, self.P[0:3, 0:3]), self.A) + Q # Update feature cross-correlations. column = 3 while column < self.P.shape[1]: self.P[0:3, column:(column + 2)] = \ np.dot(self.A, self.P[0:3, column:(column + 2)]) column += 2 log.debug("New covariance matrix: %s." % (self.P))
def check_landmark(self, location): # Check distance between this and all the known landmarks. best_landmark = None best_distance = sys.maxint for landmark in self.landmarks + self.new_landmarks: # Find the landmark that it's closest to. distance = utilities.distance(landmark.last_location, location) if distance < best_distance: best_distance = distance best_landmark = landmark if (best_landmark and best_landmark.validation_gate(location)): log.debug("Landmark at %s corresponds to landmark at %s." % \ (location, best_landmark.last_location)) best_landmark.sightings += 1 best_landmark.last_location = location self.seen_this_cycle.append(landmark) else: log.debug("Found a new landmark at %s." % (str(location))) landmark = Landmark() landmark.last_location = location self.new_this_cycle.append(landmark)
def validation_gate(self, location): #TODO (danielp): I'm not sure if I'm doing this right. z = self.__range_and_bearing(location) v = z - self.h log.debug("Range and bearing difference: %s." % (v)) value = np.dot(np.dot(np.reshape(v, (1, -1)), np.linalg.inv(self.S)), v) log.debug("Validation gate value for landmark %d: %s." % (self.id, value)) if value <= Landmark.lamda: return True return False
def rectangle_dimensions(l1, l2, l3, l4): p1 = Line.find_intersection(l1, l3) p2 = Line.find_intersection(l2, l4) p3 = Line.find_intersection(l1, l4) p4 = Line.find_intersection(l2, l3) log.debug("Rectangle points: %s, %s, %s, %s." % (p1, p2, p3, p4)) # p1 and p2 are diagonally opposite. w = utilities.distance(p1, p3) l = utilities.distance(p2, p3) log.debug("Length, Width: %f, %f." % (l, w)) return ((p1, p2, p3, p4), (w, l))
def __measurement_model(self, landmark): l_x = landmark.last_location[0] l_y = landmark.last_location[1] x = self.X[0] y = self.X[1] theta = self.X[2] distance = ((l_x - x)**2 + (l_y - y)**2)**(1 / 2) bearing = theta - math.atan((l_y - y) / (l_x - x)) log.debug("Landmark %d: Range %f, Bearing %f." % \ (landmark.id, distance, bearing)) return np.vstack((distance, bearing))
def __measurement_model(self, landmark): # For the landmark position, we use what we stored in the state vector from # the last iteration. l_x = self.X[landmark.state_index] l_y = self.X[landmark.state_index + 1] x = self.X[0] y = self.X[1] theta = self.X[2] distance = ((l_x - x) ** 2 + (l_y - y) ** 2) ** (1 / 2) bearing = theta - math.atan((l_y - y) / (l_x - x)) log.debug("Landmark %d: Range %f, Bearing %f." % \ (landmark.id, distance, bearing)) return np.vstack((distance, bearing))
def __init__(self, start_x, start_y, start_theta): self.lds = sensors.LDS() self.wheels = motors.Wheels() # Start with a displacement of zero, and assume we are aligned with a wall. self.x_pos = start_x self.y_pos = start_y self.bearing = start_theta # Temporary wheel positions to save so we can figure out how far we've gone # between iterations. l_pos, r_pos = self.wheels.get_distance() log.debug("Starting wheel positions: L: %d, R: %d." % (l_pos, r_pos)) self.last_l_wheel = l_pos self.last_r_wheel = r_pos # The time we last got a good position. self.last_position_time = 0 self.kalman = Kalman(self.x_pos, self.y_pos, self.bearing)
def convex_hull(scan): hull = ConvexHull(scan) # Draw it on the canvas. points = [] hull.vertices.pop(len(hull.vertices) // 2) for vertex in hull.vertices: points.append(scan[vertex[0]]) points.append(scan[hull.vertices[-1][1]]) # Sometimes we get duplicate points. seen = [] for point in points: if point not in seen: seen.append(point) log.debug("Convex hull: %s." % (seen)) return seen
def remove_outliers(scan): # Compute some statistics for our scan. distances = [item[1] for item in scan.items()] mean = np.mean(distances) standard_deviation = np.std(distances) log.debug("Mean: %d." % (mean)) log.debug("Standard Deviation: %d." % (standard_deviation)) # Remove angles that are outliers. ret = {} last_error = 0 for angle in range(0, 360): if angle in scan.keys(): distance = scan[angle][0] if abs(distance - mean) > standard_deviation: if angle - last_error > 2: # We're far enough from an error spot that this is probably okay. ret[angle] = scan[angle] else: log.debug("Removing angle %d with value of %d." % (angle, distance)) else: ret[angle] = scan[angle] else: last_error = angle return ret
def find_walls(blobs): # The minimum number of points that must lie close to a line for it to be # called a wall. consensus = 10 # The maximum distance in mm a point can be from the line # before it doesn't count as part of the wall. max_distance = 100 walls = [] for blob in blobs: if len(blob.points) < consensus: continue slope, intercept = blob.fit_line() wall_points = [] total_distance = 0 for point in blob.points: # Find the distance between the line of best fit and each point. distance = utilities.line_distance(point, slope, intercept) if distance <= max_distance: wall_points.append(point) total_distance += distance # Now see if we still have at least ten points. if len(wall_points) < consensus: continue mean_distance = total_distance / len(wall_points) # Quality is a rough measure of how good of a landmark this wall is. quality = 2 * len(wall_points) - mean_distance log.debug("Found wall with points: %s." % (wall_points)) log.debug("Wall quality: %f." % (quality)) walls.append((slope, intercept, quality)) return walls
def check_landmark(self, location): # Check distance between this and all the known landmarks. best_landmark = None best_distance = sys.maxint for landmark in self.landmarks + self.new_landmarks: # Find the landmark that it's closest to. distance = utilities.distance(landmark.last_location, location) if distance < best_distance: best_distance = distance best_landmark = landmark if (best_landmark and best_landmark.validation_gate(location)): log.debug("Landmark at %s corresponds to landmark at %s." % \ (location, best_landmark.last_location)) best_landmark.sightings += 1 best_landmark.last_location = location else: log.debug("Found a new landmark at %s." % (str(location))) landmark = Landmark() landmark.last_location = location self.new_this_cycle.append(landmark)
def __enhance_with_lidar(self, dx, dy, dtheta): scan = self.lds.get_scan(stale_time = 0) scan = filters.remove_outliers(scan) points = utilities.to_rectangular(scan) landmarks = self.__find_landmarks(points) log.debug("Found landmarks at: %s." % (landmarks)) # Run the kalman filter. self.x_pos, self.y_pos, self.bearing = \ self.kalman.run_iteration(landmarks, dx, dy, dtheta) log.debug("Corrected position: (%f, %f)." % (self.x_pos, self.y_pos)) log.debug("Corrected bearing: %f." % (self.bearing))
def __enhance_with_lidar(self, dx, dy, dtheta): scan = self.lds.get_scan(stale_time=0) scan = filters.remove_outliers(scan) points = utilities.to_rectangular(scan) landmarks = self.__find_landmarks(points) log.debug("Found landmarks at: %s." % (landmarks)) # Run the kalman filter. self.x_pos, self.y_pos, self.bearing = \ self.kalman.run_iteration(landmarks, dx, dy, dtheta) log.debug("Corrected position: (%f, %f)." % (self.x_pos, self.y_pos)) log.debug("Corrected bearing: %f." % (self.bearing))
def __update(self, position, timestamp): if timestamp < self.last_position_time: # It's possible to get a position from the past. log.warning("Got position that was before the last one.") return if position == (self.last_l_wheel, self.last_r_wheel): # Don't do anything if the odometry doesn't think we've moved. log.warning("Got same position.") self.last_position_time = timestamp return self.last_position_time = timestamp old_bearing = self.bearing # Figure out how far we drove. distance_l = position[0] - self.last_l_wheel distance_r = position[1] - self.last_r_wheel self.last_l_wheel = position[0] self.last_r_wheel = position[1] log.debug("Left distance: %d, Right distance: %d." % (distance_l, distance_r)) circumference = robot_status.ROBOT_WIDTH * math.pi dtheta = 0 if distance_l * distance_r < 0: # One wheel went forward and one went backward. left_over = abs(distance_l) - abs(distance_r) # Have the code below handle any extra distance. if left_over > 0: # Compute the change in bearing, using the wheel that traveled the shorter # distance. dtheta = distance_r / circumference * 2 * math.pi # Get the sign right. (A negative right distance means we went # clockwise, which decreases our bearing, etc.) dtheta = abs(dtheta) * (distance_r / abs(distance_r)) distance_l = distance_l + distance_r distance_r = 0 elif left_over < 0: dtheta = distance_l / circumference * 2 * math.pi dtheta = abs(dtheta) * (distance_r / abs(distance_r)) distance_r = distance_r + distance_l distance_l = 0 else: # It doesn't really matter which one we pick in this case. dtheta = distance_l / circumference * 2 * math.pi dtheta = abs(dtheta) * (distance_r / abs(distance_r)) distance_r = 0 distance_l = 0 # Calculate change in position and bearing. (Both wheels always move at # the same speed.) difference = distance_r - distance_l # Now our circumference is doubled, because we're only rotating with one # wheel. circumference *= 2 turn_angle = difference / circumference * 2 * math.pi dtheta += turn_angle # Since we're not rotating around the center of the robot, the x and y # positions also get changed. r = robot_status.ROBOT_WIDTH dx = r * math.cos(turn_angle) - r dy = r * math.sin(turn_angle) if turn_angle: # Cos remains positive regardless of the angle, so we need to negate it if # we're going in the other direction. dx *= turn_angle / abs(turn_angle) # Calculate change in position from driving straight. straight_component = min(abs(distance_r), abs(distance_l)) # Get the sign right. if straight_component: if straight_component == abs(distance_r): straight_component *= distance_r / straight_component else: straight_component *= distance_l / straight_component dx += straight_component * math.cos(old_bearing) dy += straight_component * math.sin(old_bearing) self.x_pos += dx self.y_pos += dy self.bearing += dtheta # Normalize bearing. if self.bearing: self.bearing = self.bearing % \ (2 * math.pi * (self.bearing / abs(self.bearing))) if self.bearing < 0: self.bearing = 2 * math.pi - abs(self.bearing) log.debug("New position: (%f, %f)." % (self.x_pos, self.y_pos)) log.debug("New bearing: %f." % (math.degrees(self.bearing))) # Now that the robot has moved, use laser data and the Kalman filter to # improve our odometry data. self.__enhance_with_lidar(dx, dy, dtheta)
def incorporate_new(self, dx, dy, dtheta): log.debug("Running SLAM new landmark incorporation step...") landmarks = self.landmark_db.get_new_landmarks() next_index = self.X.shape[0] next_row = self.P.shape[0] next_column = self.P.shape[1] log.debug("Index of next landmark in X: %d." % (next_index)) log.debug("Next row, column in P: %d, %d." % (next_row, next_column)) # Calculate SLAM-specific jacobians. J_xr, J_z = self.__slam_jacobians(self.A, dx, dy, dtheta) log.debug("J_xr: %s\n, J_z: %s." % (J_xr, J_z)) # Extend the state vector to contain all the new landmarks. rows_needed = len(landmarks) * 2 padding = np.zeros((rows_needed, 1)) self.X = np.vstack((self.X, padding)) # Extend the covariance vector to contain all the new landmarks. column_padding = np.zeros((self.P.shape[0], rows_needed)) self.P = np.hstack((self.P, column_padding)) row_padding = np.zeros((rows_needed, self.P.shape[1])) self.P = np.vstack((self.P, row_padding)) for landmark in landmarks: # Add it to the state vector. self.X[next_index] = landmark.last_location[0] self.X[next_index + 1] = landmark.last_location[1] next_index += 2 # Add it to the covariance matrix. R = self.__measurement_noise(landmark.z[0]) # NOTE: The MIT paper has a mistake on this one, they use the whole P # matrix instead of just the submatrix. Not only is this mathematically # impossible, if you go and look at the sources you will see that it is # actually just supposed to be the upper left 3x3 submatrix. covariance = np.dot(np.dot(J_xr, self.P[0:3, 0:3]), J_xr.T) + \ np.dot(np.dot(J_z, R), J_z.T) log.debug("Covariance of landmark %d: %s." % (landmark.id, covariance)) self.P[next_row:(next_row + 2), next_column:(next_column + 2)] = covariance # Covariance between robot and landmark. robot_landmark_cov = np.dot(self.P[0:3, 0:3], J_xr.T) log.debug("Covariance between robot and landmark %d: %s." % (landmark.id, robot_landmark_cov)) # NOTE: Again, the MIT paper is incorrect here. In the section defining # the covariance matrix, the definitions for submatrices "D" and "E" # should be switched. self.P[0:3, next_column:(next_column + 2)] = robot_landmark_cov # Covariance between landmark and robot. self.P[next_row:(next_row + 2), 0:3] = robot_landmark_cov.T # TODO (danielp): There is an issue somewhere again, because in both # papers that I looked at, the math doesn't work out. I'll therefore have # to try both ways that it could be. # Covariance between landmark and each other landmark. row = next_row column = 3 diag_row = 3 diag_column = next_column while column < next_column: # In the bottom two rows. landmark_landmark = np.dot(J_xr, self.P[0:3, column:(column + 2)]) self.P[row:(row + 2), column:(column + 2)] = landmark_landmark # The diagonal one. self.P[diag_row:(diag_row + 2), diag_column:(diag_column + 2)] = \ landmark_landmark.T column += 2 diag_row += 2 next_row += 2 next_column += 2 log.debug("New covariance matrix: %s." % (self.P))
def find_blobs(points): log.debug("Starting blob finder...") # How close points have to be to be in the same blob. blob_threshold = 300 # Sort points by x coordinates. sort = sorted(points, key=itemgetter(0)) # Add "used" flag to list. x_order = [] for item in sort: x_order.append([item, False]) # For each point, find all the ones that are close to it. blobs = [] for index in range(0, len(x_order)): point = x_order[index][0] # Find a subset of the points that are possibly part of this blob. possible = [] # First do it by x's. radius = 1 none_lower = False none_higher = False while (not (none_lower and none_higher)): if index - radius < 0: none_lower = True if index + radius >= len(x_order): none_higher = True if not none_lower: lower = x_order[index - radius] value = lower[0] used = lower[1] if point[0] - value[0] < blob_threshold: if not used: possible.append(index - radius) else: none_lower = True if not none_higher: higher = x_order[index + radius] value = higher[0] used = higher[1] if value[0] - point[0] < blob_threshold: if not used: possible.append(index + radius) else: none_higher = True radius += 1 # Now see if any of them work with the y value as well. to_delete = [] for candidate_index in possible: candidate = x_order[candidate_index][0] if abs(point[1] - candidate[1]) > blob_threshold: # This point can't work. to_delete.append(candidate_index) for delete in to_delete: possible.remove(delete) # And now we have points that it is worth actually checking the distance on. blob = Blob.find_blob(point, blobs) for candidate_index in possible: candidate = x_order[candidate_index][0] if utilities.distance(point, candidate) < blob_threshold: # This is part of the blob. if blob: # Add candidate to our existing blob. blob.add_point(candidate) else: # Make a new blob. blobs.append(Blob([point, candidate])) # We'll get to each candidate later as the point we're checking, so we can # flag it as used. x_order[candidate_index][1] = True # We can also get rid of the original point, because we've already found # everything close to it. x_order[index][1] = True for blob in blobs: log.debug("Found blob: %s." % (blob)) return blobs
def ombb(points): hull = convex_hull(points) Line.hull = hull # Find extreme points. x_max = -sys.maxint x_min = sys.maxint y_max = -sys.maxint y_min = sys.maxint top_point = None bottom_point = None left_point = None right_point = None for i in range(0, len(hull)): point = hull[i] if point[0] > x_max: x_max = point[0] right_point = i if point[0] < x_min: x_min = point[0] left_point = i if point[1] > y_max: y_max = point[1] top_point = i if point[1] < y_min: y_min = point[1] bottom_point = i log.debug("x_min, x_max, y_min, y_max: %d, %d, %d, %d." % \ (x_min, x_max, y_min, y_max)) log.debug("Ext. point indices: (L, R, T, B) %d, %d, %d, %d." % \ (left_point, right_point, top_point, bottom_point)) top = Line((x_max, y_max), (x_min, y_max), top_point) bottom = Line((x_min, y_min), (x_max, y_min), bottom_point) left = Line((x_min, y_max), (x_min, y_min), left_point) right = Line((x_max, y_min), (x_max, y_max), right_point) sides = [top, bottom, left, right] # Create lines for edges of hull. hull_edges = {} for i in range(0, len(hull)): p1 = hull[i] p2 = hull[(i + 1) % len(hull)] hull_edges[i] = Line(p1, p2, i) log.debug("Hull edges: %s." % (hull_edges)) # Run rotating caliper algorithm. best_area = sys.maxint best_points = () best_dimensions = () for i in range(0, len(hull)): # Find the side with the smallest slope. smallest_angle = sys.maxint use_side = None for side in sides: angle = Line.find_angle_between(side, hull_edges[side.index]) log.debug("Angle between lines: %f." % (angle)) if angle < smallest_angle: smallest_angle = angle use_side = side # Rotate. use_side.slope = hull_edges[use_side.index].slope use_side.index = (use_side.index + 1) % len(hull) log.debug("New slope: %s." % str((use_side.slope))) log.debug("New index: %f." % (use_side.index)) index = sides.index(use_side) if (index == 0 or index == 2): parallel = index + 1 elif (index == 1 or index == 3): parallel = index - 1 orthogonal = range(0, 4) orthogonal.remove(index) orthogonal.remove(parallel) log.debug("Side indices: Main: %d, ||: %d, +: %d, %d." % \ (index, parallel, orthogonal[0], orthogonal[1])) sides[parallel].slope = use_side.slope for i in orthogonal: sides[i].orthogonal(use_side) # The points should be top left, bottom right, top right, and bottom left. points, dimensions = rectangle_dimensions(*sides) area = dimensions[0] * dimensions[1] log.debug("Area: %f." % (area)) if area < best_area: best_area = area best_points = points best_dimensions = dimensions log.debug("Best area: %f\n, Best points: %s\n, Best_dimensions: %s" % \ (best_area, best_points, best_dimensions)) return best_points, best_dimensions
def landmark_update(self): log.debug("Running SLAM landmark update step...") # Get all the landmarks from the database. landmarks = self.landmark_db.get_all_landmarks() # Update landmark range and bearing. for i in range(0, len(landmarks)): landmark = landmarks[i] # Use the measurement model. landmark.h = self.__measurement_model(landmark) # Compute measurement noise. R = self.__measurement_noise(landmark.z[0]) log.debug("Measurement noise: %s." % (R)) # Create the H matrix. H = np.zeros((2, len(landmarks) * 2 + 3)) # Fill in the first part of the H matrix. jacobian = self.__measurement_jacobian(landmark) H[0:2, 0:3] = jacobian # Set the landmark-specific part. H[0:2, (i + 3):(i + 5)] = jacobian[0:2, 0:2] * -1 log.debug("H matrix for landmark %d: %s." % (landmark.id, H)) # Now we can compute the innovation covariance. V = np.identity(2) landmark.S = np.dot(np.dot(H, self.P), H.T) + np.dot( np.dot(V, R), V.T) log.debug("Innovation covariance for landmark %d: %s." % (landmark.id, landmark.S)) # Only actually adjust the state if the landmark is usable. if landmark.usable(): K = np.dot(np.dot(self.P, H.T), np.linalg.inv(landmark.S)) log.debug("Kalman gain for landmark %d: %s." % (landmark.id, K)) # Compute a new state vector from the Kalman gain. innovation = landmark.z - landmark.h log.debug("Innovation for landmark %d: %s." % (landmark.id, innovation)) self.X = self.X + np.dot(K, innovation) log.debug("Corrected state vector: %s." % (self.X))
def landmark_update(self): log.debug("Running SLAM landmark update step...") # Get all the landmarks from the database. landmarks = self.landmark_db.get_all_landmarks() # Update landmark range and bearing. for i in range(0, len(landmarks)): landmark = landmarks[i] # Use the measurement model. landmark.h = self.__measurement_model(landmark) # Compute measurement noise. R = self.__measurement_noise(landmark.z[0]) log.debug("Measurement noise: %s." % (R)) # Create the H matrix. H = np.zeros((2, len(landmarks) * 2 + 3)) # Fill in the first part of the H matrix. jacobian = self.__measurement_jacobian(landmark) H[0:2, 0:3] = jacobian # Set the landmark-specific part. H[0:2, (i + 3):(i + 5)] = jacobian[0:2, 0:2] * -1 log.debug("H matrix for landmark %d: %s." % (landmark.id, H)) # Now we can compute the innovation covariance... V = np.identity(2) landmark.S = np.dot(np.dot(H, self.P), H.T) + np.dot(np.dot(V, R), V.T) log.debug("Innovation covariance for landmark %d: %s." % (landmark.id, landmark.S)) # Only actually adjust the state if the landmark is usable. if landmark.usable(): K = np.dot(np.dot(self.P, H.T), np.linalg.inv(landmark.S)) log.debug("Kalman gain for landmark %d: %s." % (landmark.id, K)) # Compute a new state vector from the Kalman gain. innovation = landmark.z - landmark.h log.debug("Innovation for landmark %d: %s." % (landmark.id, innovation)) self.X = self.X + np.dot(K, innovation) log.debug("Corrected state vector: %s." % (self.X))
def __init__(self, p1, p2, hullpoint_index): self.slope = (p2[0] - p1[0], p2[1] - p1[1]) self.index = hullpoint_index log.debug("Line slope, index: %s, %d." % (self.slope, self.index))
def incorporate_new(self, dx, dy, dtheta): log.debug("Running SLAM new landmark incorporation step...") landmarks = self.landmark_db.get_new_landmarks() next_index = self.X.shape[0] next_row = self.P.shape[0] next_column = self.P.shape[1] log.debug("Index of next landmark in X: %d." % (next_index)) log.debug("Next row, column in P: %d, %d." % (next_row, next_column)) # Calculate SLAM-specific jacobians. J_xr, J_z = self.__slam_jacobians(self.A, dx, dy, dtheta) log.debug("J_xr: %s\n, J_z: %s." % (J_xr, J_z)) # Extend the state vector to contain all the new landmarks. rows_needed = len(landmarks) * 2 padding = np.zeros((rows_needed, 1)) self.X = np.vstack((self.X, padding)) # Extend the covariance vector to contain all the new landmarks. column_padding = np.zeros((self.P.shape[0], rows_needed)) self.P = np.hstack((self.P, column_padding)) row_padding = np.zeros((rows_needed, self.P.shape[1])) self.P = np.vstack((self.P, row_padding)) for landmark in landmarks: # Add it to the state vector. self.X[next_index] = landmark.last_location[0] self.X[next_index + 1] = landmark.last_location[1] landmark.state_index = next_index next_index += 2 # Add it to the covariance matrix. R = self.__measurement_noise(landmark.z[0]) # NOTE: The MIT paper has a mistake on this one, they use the whole P # matrix instead of just the submatrix. Not only is this mathematically # impossible, if you go and look at the sources you will see that it is # actually just supposed to be the upper left 3x3 submatrix. covariance = np.dot(np.dot(J_xr, self.P[0:3, 0:3]), J_xr.T) + \ np.dot(np.dot(J_z, R), J_z.T) log.debug("Covariance of landmark %d: %s." % (landmark.id, covariance)) self.P[next_row:(next_row + 2), next_column:(next_column + 2)] = covariance # Covariance between robot and landmark. robot_landmark_cov = np.dot(self.P[0:3, 0:3], J_xr.T) log.debug("Covariance between robot and landmark %d: %s." % (landmark.id, robot_landmark_cov)) # NOTE: Again, the MIT paper is incorrect here. In the section defining # the covariance matrix, the definitions for submatrices "D" and "E" # should be switched. self.P[0:3, next_column:(next_column + 2)] = robot_landmark_cov # Covariance between landmark and robot. self.P[next_row:(next_row + 2), 0:3] = robot_landmark_cov.T # TODO (danielp): There is an issue somewhere again, because in both # papers that I looked at, the math doesn't work out. I have found other # sources which seem to indicate that the way I have it now is correct. # Covariance between landmark and each other landmark. row = next_row column = 3 diag_row = 3 diag_column = next_column while column < next_column: # In the bottom two rows. landmark_landmark = np.dot(J_xr, self.P[0:3, column:(column + 2)]) self.P[row:(row + 2), column:(column + 2)] = landmark_landmark # The diagonal one. self.P[diag_row:(diag_row + 2), diag_column:(diag_column + 2)] = \ landmark_landmark.T column += 2 diag_row += 2 next_row += 2 next_column += 2 log.debug("New covariance matrix: %s." % (self.P))
def stopped_driving(self, position, timestamp): log.debug("Wheel position at end of driving: %s." % (str(position))) self.__update(position, timestamp)
def find_blobs(points): log.debug("Starting blob finder...") # How close points have to be to be in the same blob. blob_threshold = 300 # Sort points by x coordinates. sort = sorted(points, key = itemgetter(0)) # Add "used" flag to list. x_order = [] for item in sort: x_order.append([item, False]) # For each point, find all the ones that are close to it. blobs = [] for index in range(0, len(x_order)): point = x_order[index][0] # Find a subset of the points that are possibly part of this blob. possible = [] # First do it by x's. radius = 1 none_lower = False none_higher = False while (not (none_lower and none_higher)): if index - radius < 0: none_lower = True if index + radius >= len(x_order): none_higher = True if not none_lower: lower = x_order[index - radius] value = lower[0] used = lower[1] if point[0] - value[0] < blob_threshold: if not used: possible.append(index - radius) else: none_lower = True if not none_higher: higher = x_order[index + radius] value = higher[0] used = higher[1] if value[0] - point[0] < blob_threshold: if not used: possible.append(index + radius) else: none_higher = True radius += 1 # Now see if any of them work with the y value as well. to_delete = [] for candidate_index in possible: candidate = x_order[candidate_index][0] if abs(point[1] - candidate[1]) > blob_threshold: # This point can't work. to_delete.append(candidate_index) for delete in to_delete: possible.remove(delete) # And now we have points that it is worth actually checking the distance on. blob = Blob.find_blob(point, blobs) for candidate_index in possible: candidate = x_order[candidate_index][0] if utilities.distance(point, candidate) < blob_threshold: # This is part of the blob. if blob: # Add candidate to our existing blob. blob.add_point(candidate) else: # Make a new blob. blobs.append(Blob([point, candidate])) # We'll get to each candidate later as the point we're checking, so we can # flag it as used. x_order[candidate_index][1] = True # We can also get rid of the original point, because we've already found # everything close to it. x_order[index][1] = True for blob in blobs: log.debug("Found blob: %s." % (blob)) return blobs
def update_position(self): position = self.wheels.get_distance(stale_time = 0) log.debug("Current wheel position: %s." % (str(position))) self.__update(position, time.time())
def update_position(self): position = self.wheels.get_distance(stale_time=0) log.debug("Current wheel position: %s." % (str(position))) self.__update(position, time.time())