def globalTscreen(w, h, pix_scale, pts_screen): """ Takes point in screen coordinates (2xn) and return points in global coordinates (2xn) """ l_t_cv = numpy.linalg.inv(create_screen_transform(w,h)) pts_cpy = pts_screen.copy() transformed = l_t_cv * ut.point_to_homo(pts_cpy) points2d = ut.homo_to_point(transformed) scaled = scale(points2d, 1.0/pix_scale) return scaled
def screenTglobal(w, h, pix_scale, pts_global): """ Takes homogeneous points in player/stage frame to open cv's display coordinates. w width of opencv window h height of opencv window scale pixels per unit of pts_global pts_global 2xn matrix """ cv_t_l = create_screen_transform(w, h) pts_cpy = pts_global.copy() return ut.homo_to_point(cv_t_l * ut.point_to_homo(scale(pts_cpy, pix_scale)))
def calc(self, t): """ Return point(s) in the robot's current coordinate frame (frame b) i.e. bG Given: aG a_T_o b_T_o Calc: bG With: bG (b_T_o) (o_T_a) aG """ if (self.global_point == None): self.remember(self.local_point) current_pose = self.robot_pose_node.val(t) #print 'V_KeepLocal_P2d_V', current_pose b_T_o = n.transform2D(current_pose.pos, current_pose.angle) results = ut.homo_to_point(b_T_o * self.global_point) #print 'V_KeepLocal_P2d_V', results.T return results
def calc(self, t): g = self.global_pos.val(t) l = self.local_pose.val(t) g_T_l = np.linalg.inv(n.transform2D(g.pos, g.angle)) return ut.homo_to_point(g_T_l * ut.point_to_homo(l))
def predict(self, odometry, object_state): """ state of object in last local frame""" new_pose = self.motion.predict(odometry, Pose2D(0.0, 0.0, 0.0)) b_g_a = t2d.tranform2d(new_pose.pos, angle) return ut.homo_to_point(b_g_a * ut.point_to_homo(object_state))
def predict_partial_f(object_state): new_pose = rm(t2d.Pose2D(0.0, 0.0, 0.0)) b_g_a = t2d.transform2D(new_pose.pos, new_pose.angle) return ut.homo_to_point(b_g_a * ut.point_to_homo(object_state))