def getYs(self, sas, mas, X): estFrame = self.getFrame() ys = [] for i in range(len(X)): yi = [0, 0, 0] for j in range(len(sas)): sa = sas[j] ma = mas[j] xi = X[i] if type(sa) == type(None): yi[j] = 15 continue # transform active sensor segment to this sigma point frame xFrame = matrix_utils.translation2d( xi[0], xi[1]) @ matrix_utils.rotation2d(xi[2]) tf = xFrame @ np.linalg.inv(estFrame) sat1 = tf @ matrix_utils.toAffine(sa[0]) sat2 = tf @ matrix_utils.toAffine(sa[1]) ma1 = matrix_utils.toAffine(ma[0]) ma2 = matrix_utils.toAffine(ma[1]) # get intersection point with transformed segment intersect = seg_intersect(sat1, sat2, ma1, ma2) if type(intersect) == type(None): yi[j] = 15 yi[j] = min(np.linalg.norm(intersect[0:2] - X[i][0:2]), 15) ys.append(yi) return np.array(ys)
def getPoints(self): scl2 = matrix_utils.scale2d(100.0 / 60.0, 100.0 / 60.0) pts = [] for w in range(len(self.walls)): pttmp = [] rot = matrix_utils.rotation2d(np.pi / 2 * (w + 1)) tdif = (self.dim[0] - self.dim[1]) / 2 t1 = matrix_utils.translation2d(-len(self.walls[w]) / 2, tdif * (1 - w % 2) + 3) scl1 = matrix_utils.scale2d(-1, 1) tf1 = rot @ scl1 @ t1 for i in range(len(self.walls[w])): pttmp.append([i, self.walls[w][i], 1]) pttmp.append([i + 1, self.walls[w][i], 1]) for i in range(len(pttmp)): pttmp[i] = tf1 @ np.array(pttmp[i], dtype="float64") pts += pttmp for i in range(len(pts)): pts[i] = scl2 @ np.array(pts[i]) pts[i] = np.array(pts[i], dtype="float64") return np.array(pts)
def getFrame(self): return mu.translation2d(self.state[0], self.state[1]) @ mu.rotation2d( self.state[2])
def getPoints(self): cov_truncated = np.copy(self.P) cov_truncated[2] = np.array([0, 0, 1]) trans = matrix_utils.translation2d(self.est[0], self.est[1]) rot = matrix_utils.rotation2d(self.est[2]) return (trans @ cov_truncated @ rot @ shape.T).T
def setPosition(self, pose): self.frame = matrix_utils.translation2d( pose[0], pose[1]) @ matrix_utils.rotation2d(pose[2])
def getPoints(self): tf = matrix_utils.translation2d(self.bX, self.bY) return matrix_utils.tfPoints(shape, tf)
def __init__(self, x, y): trans = matrix_utils.translation2d(x, y) self.points = matrix_utils.tfPoints(bshape, trans) self.color = (128, 255, 0)
def setState(self, x, y, t): trans = matrix_utils.translation2d(x, y) rot = matrix_utils.rotation2d(t) scl = matrix_utils.scale2d(0.7, 0.7) self.points = matrix_utils.tfPoints(robot.robot_shape, trans @ rot @ scl)
def __init__(self, x, y): trans = matrix_utils.translation2d(x, y) self.posn = np.array([x, y]) self.points = matrix_utils.tfPoints(bshape, trans) self.colorClass = np.random.random_integers(0, nClasses-1) self.color = colors[self.colorClass]
def setOffscreen(self): trans = matrix_utils.translation2d(30, 30) self.points = matrix_utils.tfPoints(cameraPtShape, trans)
def getFrame(self): est = self.avg() return matrix_utils.translation2d( est[0], est[1]) @ matrix_utils.rotation2d(est[2])
def setLocation(self, d, th, cFrame): trans = matrix_utils.translation2d(d, 0) rot = matrix_utils.rotation2d(th) self.points = matrix_utils.tfPoints(cameraPtShape, cFrame @ rot @ trans)
def getFrame(x): return mu.translation2d(x[0], x[1]) @ mu.rotation2d(x[2])
def getFrame(self): return matrix_utils.translation2d(self.x, self.y) @ matrix_utils.rotation2d(self.t)
def getPoints(self): cov_truncated = np.copy(self.cov) cov_truncated[2] = np.array([0, 0, 1]) trans = matrix_utils.translation2d(self.mu[0], self.mu[1]) rot = matrix_utils.rotation2d(self.mu[2]) return matrix_utils.tfPoints(shape, trans @ cov_truncated @ rot)
def getPoints(self): trans = matrix_utils.translation2d(self.est[0], self.est[1]) covTf = np.block([[self.cov, np.zeros(1, 3)], [np.zeros(3, 1), [1]]]) return matrix_utils.tfPoints(featureShape, trans @ covTf)