def globalTscreen(w, h, pix_scale, pts_screen): """ Takes point in screen coordinates (2xn) and return points in global coordinates (2xn) """ l_t_cv = numpy.linalg.inv(create_screen_transform(w,h)) pts_cpy = pts_screen.copy() transformed = l_t_cv * ut.point_to_homo(pts_cpy) points2d = ut.homo_to_point(transformed) scaled = scale(points2d, 1.0/pix_scale) return scaled
def screenTglobal(w, h, pix_scale, pts_global): """ Takes homogeneous points in player/stage frame to open cv's display coordinates. w width of opencv window h height of opencv window scale pixels per unit of pts_global pts_global 2xn matrix """ cv_t_l = create_screen_transform(w, h) pts_cpy = pts_global.copy() return ut.homo_to_point(cv_t_l * ut.point_to_homo(scale(pts_cpy, pix_scale)))
def remember(self, local_point): """ Set egocentric points to keep track of """ self.local_point = local_point point = local_point.val(-1) start_pos = self.robot_pose_node.val(-1) a_T_o = n.transform2D(start_pos.pos, start_pos.angle) self.global_point = np.linalg.inv(a_T_o) * ut.point_to_homo(point) if (self.verbosity > 0): print "V_KeepLocal_P2d_V: (", self.name, ") point ", point.T print "V_KeepLocal_P2d_V: (", self.name, ") start_pos ", start_pos print "V_KeepLocal_P2d_V: (", self.name, ") global_point ", self.global_point.T
def calc(self, t): g = self.global_pos.val(t) l = self.local_pose.val(t) g_T_l = np.linalg.inv(n.transform2D(g.pos, g.angle)) return ut.homo_to_point(g_T_l * ut.point_to_homo(l))
def predict(self, odometry, object_state): """ state of object in last local frame""" new_pose = self.motion.predict(odometry, Pose2D(0.0, 0.0, 0.0)) b_g_a = t2d.tranform2d(new_pose.pos, angle) return ut.homo_to_point(b_g_a * ut.point_to_homo(object_state))
def predict_partial_f(object_state): new_pose = rm(t2d.Pose2D(0.0, 0.0, 0.0)) b_g_a = t2d.transform2D(new_pose.pos, new_pose.angle) return ut.homo_to_point(b_g_a * ut.point_to_homo(object_state))