def callback(self, msg): patrol_bot = PatrolBot() patrol_bot.name = self.veh #detect_msg = BoolStamped() #detect_msg.data = True #self.pub_detected.publish(detect_msg) # Load tag detections message for detection in msg.detections: # ------ start tag info processing new_info = TagInfo() new_info.id = int(detection.id) id_info = self.tags_dict[new_info.id] if detection.pose.pose.orientation.y <= 0.5: patrol_bot.direction = "cw" else: patrol_bot.direction = "ccw" patrol_bot.id = new_info.id print "detect id ", new_info.id '''if new_info.id == 1: patrol_bot.direction = "cw" patrol_bot.id = 1 print "detect tag 1" elif new_info.id == 2: patrol_bot.direction = "cw" patrol_bot.id = 2 print "detect tag 2" elif new_info.id == 3: patrol_bot.direction = "cw" patrol_bot.id = 3 print "detect tag 3" elif new_info.id == 4: patrol_bot.direction = "cw" patrol_bot.id = 4 print "detect tag 4" elif new_info.id == 5: patrol_bot.direction = "ccw" patrol_bot.id = 1 print "detect tag 5" elif new_info.id == 6: patrol_bot.direction = "ccw" patrol_bot.id = 2 print "detect tag 6" elif new_info.id == 7: patrol_bot.direction = "ccw" patrol_bot.id = 3 print "detect tag 7" elif new_info.id == 8: patrol_bot.direction = "ccw" patrol_bot.id = 4 print "detect tag 8" else: continue''' self.pub_info.publish(patrol_bot)
def callback(self, msg): patrol_bot = PatrolBot() patrol_bot.name = self.veh #detect_msg = BoolStamped() #detect_msg.data = True #self.pub_detected.publish(detect_msg) # Load tag detections message for detection in msg.detections: # ------ start tag info processing new_info = TagInfo() new_info.id = int(detection.id) id_info = self.tags_dict[new_info.id] if detection.pose.pose.orientation.y <= 0.5: patrol_bot.direction = "cw" else: patrol_bot.direction = "ccw" patrol_bot.id = new_info.id print "detect id ", new_info.id self.pub_info.publish(patrol_bot)
def callback(self, msg): detect_msg = BoolStamped() detect_msg.data = True self.pub_detected.publish(detect_msg) tag_infos = [] # Load tag detections message for detection in msg.detections: # ------ start tag info processing new_info = TagInfo() new_info.id = int(detection.id) id_info = self.tags_dict[new_info.id] if new_info.id == 1: self.turnright = True self.turnleft = False self.turnaround = False #print "---------Tag 1---------Turn Right-----------" elif new_info.id == 2: self.turnright = False self.turnleft = True self.turnaround = False #print "---------Tag 2---------Turn LEFT-----------" elif new_info.id == 3: self.turnright = False self.turnleft = False self.turnaround = True print "---------Tag 3---------Turn around-----------" # Check yaml file to fill in ID-specific information new_info.tag_type = self.sign_types[id_info['tag_type']] if new_info.tag_type == self.info.S_NAME: new_info.street_name = id_info['street_name'] elif new_info.tag_type == self.info.SIGN: new_info.traffic_sign_type = self.traffic_sign_types[ id_info['traffic_sign_type']] elif new_info.tag_type == self.info.VEHICLE: new_info.vehicle_name = id_info['vehicle_name'] # TODO: Implement location more than just a float like it is now. # location is now 0.0 if no location is set which is probably not that smart if self.loc == 226: l = (id_info['location_226']) if l is not None: new_info.location = l elif self.loc == 316: l = (id_info['location_316']) if l is not None: new_info.location = l tag_infos.append(new_info) # --- end tag info processing # Define the transforms veh_t_camxout = tr.translation_matrix( (self.camera_x, self.camera_y, self.camera_z)) veh_R_camxout = tr.euler_matrix(0, self.camera_theta * np.pi / 180, 0, 'rxyz') veh_T_camxout = tr.concatenate_matrices( veh_t_camxout, veh_R_camxout) # 4x4 Homogeneous Transform Matrix camxout_T_camzout = tr.euler_matrix(-np.pi / 2, 0, -np.pi / 2, 'rzyx') veh_T_camzout = tr.concatenate_matrices(veh_T_camxout, camxout_T_camzout) tagzout_T_tagxout = tr.euler_matrix(-np.pi / 2, 0, np.pi / 2, 'rxyz') #Load translation trans = detection.pose.pose.position rot = detection.pose.pose.orientation camzout_t_tagzout = tr.translation_matrix( (trans.x * self.scale_x, trans.y * self.scale_y, trans.z * self.scale_z)) camzout_R_tagzout = tr.quaternion_matrix( (rot.x, rot.y, rot.z, rot.w)) camzout_T_tagzout = tr.concatenate_matrices( camzout_t_tagzout, camzout_R_tagzout) veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout, camzout_T_tagzout, tagzout_T_tagxout) # Overwrite transformed value (trans.x, trans.y, trans.z) = tr.translation_from_matrix(veh_T_tagxout) (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_matrix(veh_T_tagxout) detection.pose.pose.position = trans detection.pose.pose.orientation = rot new_tag_data = AprilTagsWithInfos() new_tag_data.detections = msg.detections new_tag_data.infos = tag_infos # Publish Message self.pub_postPros.publish(new_tag_data)
def callback(self, msg): tag_infos = [] new_tag_data = AprilTagsWithInfos() # Load tag detections message for detection in msg.detections: # ------ start tag info processing new_info = TagInfo() #Can use id 1 as long as no bundles are used new_info.id = int(detection.id[0]) id_info = self.tags_dict[new_info.id] # Check yaml file to fill in ID-specific information new_info.tag_type = self.sign_types[id_info['tag_type']] if new_info.tag_type == self.info.S_NAME: new_info.street_name = id_info['street_name'] elif new_info.tag_type == self.info.SIGN: new_info.traffic_sign_type = self.traffic_sign_types[ id_info['traffic_sign_type']] # publish for FSM # parking apriltag event msg_parking = BoolStamped() msg_parking.header.stamp = rospy.Time(0) if new_info.traffic_sign_type == TagInfo.PARKING: msg_parking.data = True else: msg_parking.data = False self.pub_postPros_parking.publish(msg_parking) # intersection apriltag event msg_intersection = BoolStamped() msg_intersection.header.stamp = rospy.Time(0) if (new_info.traffic_sign_type == TagInfo.FOUR_WAY) or ( new_info.traffic_sign_type == TagInfo.RIGHT_T_INTERSECT ) or (new_info.traffic_sign_type == TagInfo.LEFT_T_INTERSECT ) or (new_info.traffic_sign_type == TagInfo.T_INTERSECTION): msg_intersection.data = True else: msg_intersection.data = False self.pub_postPros_intersection.publish(msg_intersection) elif new_info.tag_type == self.info.VEHICLE: new_info.vehicle_name = id_info['vehicle_name'] # TODO: Implement location more than just a float like it is now. # location is now 0.0 if no location is set which is probably not that smart if self.loc == 226: l = (id_info['location_226']) if l is not None: new_info.location = l elif self.loc == 316: l = (id_info['location_316']) if l is not None: new_info.location = l tag_infos.append(new_info) # --- end tag info processing # Define the transforms veh_t_camxout = tr.translation_matrix( (self.camera_x, self.camera_y, self.camera_z)) veh_R_camxout = tr.euler_matrix(0, self.camera_theta * np.pi / 180, 0, 'rxyz') veh_T_camxout = tr.concatenate_matrices( veh_t_camxout, veh_R_camxout) # 4x4 Homogeneous Transform Matrix camxout_T_camzout = tr.euler_matrix(-np.pi / 2, 0, -np.pi / 2, 'rzyx') veh_T_camzout = tr.concatenate_matrices(veh_T_camxout, camxout_T_camzout) tagzout_T_tagxout = tr.euler_matrix(-np.pi / 2, 0, np.pi / 2, 'rxyz') #Load translation trans = detection.pose.pose.pose.position rot = detection.pose.pose.pose.orientation header = detection.pose.header camzout_t_tagzout = tr.translation_matrix( (trans.x * self.scale_x, trans.y * self.scale_y, trans.z * self.scale_z)) camzout_R_tagzout = tr.quaternion_matrix( (rot.x, rot.y, rot.z, rot.w)) camzout_T_tagzout = tr.concatenate_matrices( camzout_t_tagzout, camzout_R_tagzout) veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout, camzout_T_tagzout, tagzout_T_tagxout) # Overwrite transformed value (trans.x, trans.y, trans.z) = tr.translation_from_matrix(veh_T_tagxout) (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_matrix(veh_T_tagxout) new_tag_data.detections.append( AprilTagDetection(int(detection.id[0]), float(detection.size[0]), PoseStamped(header, Pose(trans, rot)))) new_tag_data.infos = tag_infos # Publish Message self.pub_postPros.publish(new_tag_data)
def callback(self, msg): tag_infos = [] # Load tag detections message for detection in msg.detections: # ------ start tag info processing new_info = TagInfo() new_info.id = int(detection.id) id_info = self.tags_dict[new_info.id] # Check yaml file to fill in ID-specific information new_info.tag_type = self.sign_types[id_info['tag_type']] if new_info.tag_type == self.info.S_NAME: new_info.street_name = id_info['street_name'] elif new_info.tag_type == self.info.SIGN: new_info.traffic_sign_type = self.traffic_sign_types[id_info['traffic_sign_type']] elif new_info.tag_type == self.info.VEHICLE: new_info.vehicle_name = id_info['vehicle_name'] # TODO: Implement location more than just a float like it is now. # location is now 0.0 if no location is set which is probably not that smart if self.loc == 226: l = (id_info['location_226']) if l is not None: new_info.location = l elif self.loc == 316: l = (id_info['location_316']) if l is not None: new_info.location = l tag_infos.append(new_info) # --- end tag info processing # Define the transforms veh_t_camxout = tr.translation_matrix((self.camera_x, self.camera_y, self.camera_z)) veh_R_camxout = tr.euler_matrix(0, self.camera_theta*np.pi/180, 0, 'rxyz') veh_T_camxout = tr.concatenate_matrices(veh_t_camxout, veh_R_camxout) # 4x4 Homogeneous Transform Matrix camxout_T_camzout = tr.euler_matrix(-np.pi/2,0,-np.pi/2,'rzyx') veh_T_camzout = tr.concatenate_matrices(veh_T_camxout, camxout_T_camzout) tagzout_T_tagxout = tr.euler_matrix(-np.pi/2, 0, np.pi/2, 'rxyz') #Load translation trans = detection.pose.pose.position rot = detection.pose.pose.orientation camzout_t_tagzout = tr.translation_matrix((trans.x*self.scale_x, trans.y*self.scale_y, trans.z*self.scale_z)) camzout_R_tagzout = tr.quaternion_matrix((rot.x, rot.y, rot.z, rot.w)) camzout_T_tagzout = tr.concatenate_matrices(camzout_t_tagzout, camzout_R_tagzout) veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout, camzout_T_tagzout, tagzout_T_tagxout) # Overwrite transformed value (trans.x, trans.y, trans.z) = tr.translation_from_matrix(veh_T_tagxout) (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_matrix(veh_T_tagxout) detection.pose.pose.position = trans detection.pose.pose.orientation = rot new_tag_data = AprilTagsWithInfos() new_tag_data.detections = msg.detections new_tag_data.infos = tag_infos # Publish Message self.pub_postPros.publish(new_tag_data)
def callback(self, msg): """ """ # Load tag detections message tag_infos = [] for detection in msg.detections: new_info = TagInfo() #new_location_info = Vector2D() new_info.id = int(detection.id) id_info = self.tags_dict[new_info.id] # Check yaml file to fill in ID-specific information new_info.tag_type = self.sign_types[id_info['tag_type']] if new_info.tag_type == self.info.S_NAME: new_info.street_name = id_info['street_name'] elif new_info.tag_type == self.info.SIGN: new_info.traffic_sign_type = self.traffic_sign_types[ id_info['traffic_sign_type']] elif new_info.tag_type == self.info.VEHICLE: new_info.vehicle_name = id_info['vehicle_name'] # TODO: Implement location more than just a float like it is now. # location is now 0.0 if no location is set which is probably not that smart if self.loc == 226: l = (id_info['location_226']) if l is not None: new_info.location = l elif self.loc == 316: l = (id_info['location_316']) if l is not None: new_info.location = l ####################### # Localization stuff ####################### """ camera_x = 0.05 # x distance from wheel center camera_y = 0.0 # camera_z = 0.1 # height of camera from ground camera_theta = 15 # degree of rotation arround y axis """ camera_x = rospy.get_param("~camera_x") camera_y = rospy.get_param("~camera_y") camera_z = rospy.get_param("~camera_z") camera_theta = rospy.get_param("~camera_theta") #Load translation x = detection.transform.translation.x y = detection.transform.translation.y z = detection.transform.translation.z t_tc_Fc = k.Vector( x, y, z ) # translation tags(t) w/ camera(c) expressed in camera frame (Fc) #Load rotation x = detection.transform.rotation.x y = detection.transform.rotation.y z = detection.transform.rotation.z w = detection.transform.rotation.w e = k.Vector(x, y, z) Q_Ftag_Fold = k.Quaternion(e, w) # New tag orientation reference (zero when facing camera) w/ to old tag ref used by the lib C_Ft_Ftag = k.RotationMatrix( np.matrix([[0, 0, -1], [-1, 0, 0], [0, 1, 0]])) Q_Ft_Ftag = C_Ft_Ftag.toQuaternion() # Rotation of old ref frame used by the lib w/ to camera frame C_Fold_Fc = k.RotationMatrix( np.matrix([[0, -1, 0], [0, 0, -1], [1, 0, 0]])) Q_Fold_Fc = C_Fold_Fc.toQuaternion() # Camera localization t_cv_Fv = k.Vector( camera_x, camera_y, camera_z ) # translation of camera w/ vehicle origin in vehicle frame C_Fc_Fv = k.euler2RotationMatrix( 0, camera_theta, 0) # Rotation of camera frame w/ vehicle frame Q_Fc_Fv = C_Fc_Fv.toQuaternion() # Compute tag orientation in vehicle frame Q_Ft_Fv = Q_Fc_Fv * Q_Fold_Fc * Q_Ftag_Fold * Q_Ft_Ftag # Compute position of tag in vehicle frame expressed in vehicle frame C_Fv_Fc = -C_Fc_Fv # inverse transform t_tc_Fv = C_Fv_Fc * t_tc_Fc t_tv_Fv = t_tc_Fv + t_cv_Fv # Overwrite transformed value detection.transform.translation.x = t_tv_Fv.x detection.transform.translation.y = t_tv_Fv.y detection.transform.translation.z = t_tv_Fv.z detection.transform.rotation.x = Q_Ft_Fv.e.x detection.transform.rotation.y = Q_Ft_Fv.e.y detection.transform.rotation.z = Q_Ft_Fv.e.z detection.transform.rotation.w = Q_Ft_Fv.n # Debug Print #A_read = Q_Ftag_Fold.toAngleAxis() #A_Ft_Fv = Q_Ft_Fv.toAngleAxis() #print 'Rotation Read' #A_read() #print 'Rotation in Vehicle Frame' #A_Ft_Fv() tag_infos.append(new_info) new_tag_data = AprilTags() new_tag_data.detections = msg.detections new_tag_data.infos = tag_infos # Publish Message self.pub_postPros.publish(new_tag_data)