def convert_marker_array_to_centers(marker_array): """ Takes a marker array output by octomap_server and uses it to construct the array of uniform-distance cell centers TODO: Replace with proper reading from the OctoMap :param marker_array: a MarkerArray message :return: """ to_merge = [] base_resolution = None for marker in marker_array.markers[::-1]: if not marker.points: break if base_resolution is None: base_resolution = marker.scale.x to_merge.append(np.array([numpify(p) for p in marker.points])) else: degree = np.log2(marker.scale.x / base_resolution) threshold = 0.00001 if threshold < degree % 1 < 1 - threshold: break cell_side = 2**np.round(degree) base_array = np.arange(cell_side) base_array -= base_array.mean() array = np.array(list(product(base_array, base_array, base_array))) * base_resolution for pt in marker.points: to_merge.append(array + numpify(pt)) return np.concatenate(to_merge), base_resolution
def execute(self, ud): while not rospy.is_shutdown(): target = self.target() self.target_pub.publish(target) try: this_pose = PoseStamped() this_pose.header.frame_id = 'odom' this_pose.pose = self.odometry.value.pose.pose this_pose = self.tf_listener.transformPose('map', this_pose) except (tf.LookupException, tf.ExtrapolationException, tf.ConnectivityException), e: continue this_position = numpify(this_pose.pose.position)[0:2] target_position = numpify(target.pose.position)[0:2] if np.linalg.norm(this_position - target_position) < 0.01: self.twist_pub.publish(Twist()) break target_angle = np.arctan2(target.pose.position.y - this_pose.pose.position.y, target.pose.position.x - this_pose.pose.position.x) this_angle, _, _ = transformations.rotation_from_matrix(transformations.quaternion_matrix(numpify(this_pose.pose.orientation))) t = Twist() t.linear.x = self.v t.angular.z = -4 * angle_diff(this_angle, target_angle) self.twist_pub.publish(t)
def _obstacle_callback(self, msg): try: transform = self.tf_buffer.lookup_transform( self.map_frame, msg.header.frame_id, msg.header.stamp, rospy.Duration(1.0)) except (tf2.ConnectivityException, tf2.LookupException, tf2.ExtrapolationException) as e: rospy.logwarn(e) return for o in msg.obstacles: point = PointStamped() point.header = msg.header point.point = o.pose.pose.pose.position # Transfrom robot to map frame point = tf2_geometry_msgs.do_transform_point(point, transform) # Update robots that are close together cleaned_robots = [] for old_robot in self.robots: # Check if there is another robot in memory close to it if np.linalg.norm( ros_numpy.numpify(point.point) - ros_numpy.numpify( old_robot.point)) > self.robot_merge_distance: cleaned_robots.append(old_robot) # Update our robot list with a new list that does not contain the duplicates self.robots = cleaned_robots # Append our new robots self.robots.append(point)
def slam_callback(self, pose): self.slam_pose_raw = ros_numpy.numpify( pose.pose) #homogeneous transformation matrix from the origin self.slam_pose_np = np.dot(self.slam_pose_correction, self.slam_pose_raw) self.slam_pose_np = self.slam_pose_np * self.scale_factor_matrix self.slam_pose = ros_numpy.msgify(Pose, self.slam_pose_np) #bebop camera link correction and camera calibration correction ang_z = self.euler_from_pose(self.slam_pose)[2] self.slam_pose.position.x += -0.11 * np.cos(ang_z) self.slam_pose.position.y += -0.11 * np.sin(ang_z) self.slam_pose_np = ros_numpy.numpify(self.slam_pose) self.last_slam_time = time.time() if not self.odom_pose == None: #calculates the transfor matrix for the odom position to the modified slam coords system (assumed as true value) self.odom_pose_correction = np.dot( np.linalg.inv(self.odom_pose_raw_np), self.slam_pose_np) else: rospy.loginfo("Havent received any odom coord yet!") self.current_pose = self.slam_pose self.current_pose_np = self.slam_pose_np
def timer_callback(self, event): try: tf = self.tf_buffer.lookup_transform( target_frame=self.map_frame, source_frame=self.base_frame, time=event.current_real, timeout=rospy.Duration(0.05) ).transform except tf2_ros.LookupException: return except tf2_ros.ExtrapolationException: return except tf2_ros.ConnectivityException: return p = PoseStamped( pose=Pose( position=tf.translation, orientation=tf.rotation ) ) pn = ros_numpy.numpify(p.pose) if np.isnan(pn).any(): rospy.logwarn("Got a bad tf of {!r}".format(p.pose)) return if self.path.poses: lastn = ros_numpy.numpify(self.path.poses[-1].pose) if np.linalg.norm(lastn[:,-1] - pn[:,-1]) < 0.05: return self.path.poses.append(p) self.pub_path.publish(self.path)
def get_laser_in_baselink(self): # Get local point cloud with self.laser_lock: pc = ros_numpy.numpify(self.laser_data).ravel() source_frame = self.laser_data.header.frame_id # Array of size (4, N_points). 4th coordinate is for homogeneous transformation pc_array = np.stack([pc[f] for f in ['x', 'y', 'z']] + [np.ones(pc.size)]) pc_array_clean = pc_array[:, np.logical_not( np.any(np.isnan(pc_array), axis=0))] # Transform pc to baselink coordinates try: trans = self.tf_buffer.lookup_transform( self.config["base_link_frame_id"], source_frame, rospy.Time(0), rospy.Duration(0)) trans = ros_numpy.numpify(trans.transform) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as err: rospy.logwarn( "Transforming laser to base_link in virtual bumper failed. ", err) return None # Get the zero-roll-pitch transform pc_baselink = np.matmul(trans, pc_array_clean)[:3, :] return pc_baselink
def merged_point_publish(self, point_center, point_left, point_right): print('start merging...') self.merhing_flag = True self.start_time = rospy.Time.now().to_sec() point_left = do_transform_cloud(point_left, self.tf_left) point_right = do_transform_cloud(point_right, self.tf_right) point_center = numpify(point_center) point_left = numpify(point_left) point_right = numpify(point_right) point_set = np.append(np.append(point_center, point_left), point_right) # point_set = np.append(point_right, point_left) merged_points = msgify(PointCloud2, point_set) pub1.publish(merged_points) self.elapsed = rospy.Time.now().to_sec() - self.start_time pub2.publish(self.elapsed) print('Merged PointCloud Data Set is Published') self.center_flag = False self.left_flag = False self.right_flag = False self.merhing_flag = False
def get_pass_regions(self): """ Draws a costmap for the pass regions """ pass_dist = 1.0 pass_weight = 20.0 pass_smooth = 4.0 # Init a new costmap costmap = np.zeros_like(self.costmap) # Iterate over possible team mate poses for pose in self._blackboard.team_data.get_active_teammate_poses( count_goalies=False): # Get positions goal_position = np.array([self.field_length / 2, 0, 0]) # position of the opponent goal teammate_position = ros_numpy.numpify(pose.position) # Get vector vector_teammate_to_goal = goal_position - ros_numpy.numpify( pose.position) # Position between robot and goal but 1m away from the robot pass_pos = vector_teammate_to_goal / np.linalg.norm( vector_teammate_to_goal) * pass_dist + teammate_position # Convert position to array index idx_x, idx_y = self.field_2_costmap_coord(pass_pos[0], pass_pos[1]) # Draw pass position with smoothing independent weight on costmap costmap[idx_x, idx_y] = pass_weight * pass_smooth # Smooth obstacle map return gaussian_filter(costmap, pass_smooth)
def offset_goal(pose, offset): position = numpify(pose.pose.position) orientation = numpify(pose.pose.orientation) position += qv_mult(orientation, np.array([offset, 0.0, 0.0])) pose.pose.position = msgify(Point, position) pose.pose.orientation = msgify(Quaternion, orientation) return pose
def callback(msg1, msg2): # print msg1.data try: fname = str(time.time()) cloud_tmp_1 = numpify(msg1) cloud_split_1 = split_rgb_field(cloud_tmp_1) np1 = np.stack(cloud_split_1[f] for f in ('x', 'y', 'z', "r", "g", "b")).T cloud_tmp_2 = numpify(msg2) cloud_split_2 = split_rgb_field(cloud_tmp_2) np2 = np.stack(cloud_split_2[f] for f in ('x', 'y', 'z', "r", "g", "b")).T print("msg1 header :", msg1.header) print("msg2 header :", msg2.header) np.savez("/home/finibo/ws1/data/" + fname, np1, np2) print "------------" except: pass
def publish_once_from_queue(self): self.most_recent_goal = self.messages.pop() if len( self.messages) else self.most_recent_goal if self.most_recent_goal is not None: try: goal_frame = self.most_recent_goal.header.frame_id point = self.most_recent_goal.point trans = self.tfBuffer.lookup_transform(self.turtlebot_frame, goal_frame, rospy.Time()) rot = ros_numpy.numpify(trans.transform.rotation) rot = np.array( tf.transformations.quaternion_matrix(rot)[:3, :3]) # Process trans to get your state error # Generate a control command to send to the robot msg = Twist() point = np.dot(rot, ros_numpy.numpify(point)) + ros_numpy.numpify( trans.transform.translation) print(point) msg.linear.x = self.k1 * point[1] msg.angular.z = self.k2 * point[0] self.pub.publish(msg) # Use our rate object to sleep until it is time to publish again self.r.sleep() except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: print(e)
def execute(self, userdata): self.waitUntilTaskStart() if self.skip_adjust_place_position: return 'succeeded' if not self.do_channel_recognition: rospy.logwarn(self.__class__.__name__ + ': no recognition, skip') return 'succeeded' try: channel_trans = self.robot.getTF(self.channel_tf_frame_id, wait=self.recognition_wait) channel_trans = ros_numpy.numpify(channel_trans.transform) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): rospy.logerr(self.__class__.__name__ + ": channel position detect failed") if userdata.search_count == 0: userdata.orig_global_trans = ros_numpy.numpify( self.robot.getBaselinkOdom().pose.pose) if userdata.search_failed: #init search state userdata.search_count = 0 userdata.search_failed = False return 'succeeded' #go to next state else: return 'failed' channel_pos = tft.translation_from_matrix(channel_trans) rospy.logwarn("%s: succeed to find channel x: %f, y: %f", self.__class__.__name__, channel_pos[0], channel_pos[1]) channel_pos_local = channel_pos - self.robot.getBaselinkPos() self.robot.goPosWaitConvergence('global', channel_pos[0:2], self.robot.getTargetZ(), self.robot.getTargetYaw(), pos_conv_thresh=0.25, yaw_conv_thresh=0.1, vel_conv_thresh=0.1, timeout=15) #reset search state userdata.search_count = 0 userdata.search_failed = False # check channel is directly below if np.linalg.norm(channel_pos_local[0:2]) > self.channel_pos_thresh: rospy.logwarn( "%s: succeed to find channel, but not directly below. diff: %f", self.__class__.__name__, np.linalg.norm(channel_pos_local[0:2])) return 'adjust_again' return 'succeeded'
def camera_callback(self, k1_img_msg, k2_img_msg, k1_camera_info_msg, k2_camera_info_msg): k1_img = ros_numpy.numpify(k1_img_msg) k2_img = ros_numpy.numpify(k2_img_msg) k1_intrinsic = np.array(k1_camera_info_msg.K).reshape((3, 3)) k2_intrinsic = np.array(k2_camera_info_msg.K).reshape((3, 3)) # self.feature_match(k1_img, k2_img, k1_intrinsic, k2_intrinsic) self.process_images(k1_img, k2_img, k1_intrinsic, k2_intrinsic)
def encode_segment_image(data, controller): return { 'class_segment_img': ros_numpy.numpify(data.class_segment_img), 'instance_segment_img': ros_numpy.numpify(data.instance_segment_img), 'class_ids': { class_name: class_id for (class_name, class_id) in zip(data.class_names, data.class_ids) } }
def closest_square( position, squares): # type: (Point, List[ParkingSquare]) -> ParkingSquare differences = [ np.linalg.norm( numpify(position)[:2] - numpify(square.pose.pose.position)[:2]) for square in squares ] return squares[np.argmin(differences)]
def position(self): # type: () -> Optional[PointStamped] if self._left_post.pose is None or self._right_post.pose is None: return None assert self._left_post.position.header.frame_id == self._right_post.position.header.frame_id position = PointStamped() position.header.frame_id = self._left_post.pose.header.frame_id left_position = numpify(self._left_post.position.point) right_position = numpify(self._right_post.position.point) position.point = msgify(Point, (left_position + right_position) / 2) return position
def get_rope_point_positions(self): # NOTE: consider getting rid of this message type/service just use rope state [0] and rope state [-1] # although that looses semantic meaning and means hard-coding indices a lot... left_res: GetPosition3DResponse = self.pos3d.get( scoped_link_name=gz_scope(self.ROPE_NAMESPACE, 'left_gripper')) left_rope_point_position = ros_numpy.numpify(left_res.pos) right_res: GetPosition3DResponse = self.pos3d.get( scoped_link_name=gz_scope(self.ROPE_NAMESPACE, 'right_gripper')) right_rope_point_position = ros_numpy.numpify(right_res.pos) return left_rope_point_position, right_rope_point_position
def offset_goal(): pose = PoseStamped() pose.header.frame_id = 'map' goal_pose = goal() # type: PoseStamped position = numpify(goal_pose.pose.position) orientation = numpify(goal_pose.pose.orientation) position += qv_mult(orientation, np.array([-offset, 0.0, 0.0])) pose.pose.position = msgify(Point, position) pose.pose.orientation = msgify(Quaternion, orientation) return pose
def callback(self, points_msg, image, info): try: intrinsic_matrix = get_camera_matrix(info) rgb_image = ros_numpy.numpify(image) points = ros_numpy.numpify(points_msg) except Exception as e: rospy.logerr(e) return self.num_steps += 1 self.messages.appendleft((points, rgb_image, intrinsic_matrix))
def transform_poses(self, poses, transform): transform_np = ros_numpy.numpify(transform.transform) transformed_poses = [0] * len(poses) for i, pose in enumerate(self.poses): pose_np = ros_numpy.numpify(pose) transformed_poses[i] = np.matmul(transform_np, pose_np) return transformed_poses
def transform_poses(poses, transform): transform_np = ros_numpy.numpify(transform.transform) transformed_poses = [0] * len(poses) for i, pose in enumerate(poses): pose_np = ros_numpy.numpify(pose) tf_pose = np.matmul(transform_np, pose_np) transformed_poses[i] = ros_numpy.msgify(Pose, tf_pose) return transformed_poses
def on_enter(self, userdata): post = posts[userdata.leg_number] if not post.has_seen(): self._error = True target_pose = PoseStamped() point = numpify(post.position.point) normal = numpify(post.normal.vector) target_pose.header.frame_id = post.pose.header.frame_id target_pose.pose.position = msgify(Point, point + normal * 2) target_pose.pose.orientation = msgify(Quaternion, normal_to_quaternion(-normal)) userdata.target_pose = target_pose
def test_point(self): p = Point(1, 2, 3) p_arr = ros_numpy.numpify(p) np.testing.assert_array_equal(p_arr, [1, 2, 3]) p_arrh = ros_numpy.numpify(p, hom=True) np.testing.assert_array_equal(p_arrh, [1, 2, 3, 1]) self.assertEqual(p, ros_numpy.msgify(Point, p_arr)) self.assertEqual(p, ros_numpy.msgify(Point, p_arrh)) self.assertEqual(p, ros_numpy.msgify(Point, p_arrh * 2))
def pose_with_offset( pose, offset ): # type: (PoseStamped, Tuple[float, float, float]) -> PoseStamped offset_pose = PoseStamped() offset_pose.header.frame_id = pose.header.frame_id offset = qv_mult(rnp.numpify(pose.pose.orientation), offset) offset_pose.pose.position = rnp.msgify( Point, rnp.numpify(pose.pose.position) + offset) offset_pose.pose.orientation = pose.pose.orientation return offset_pose
def depth_image_callback(msg): global vel if vel[5] < 0 < vel[0]: # turning right commands.append([0, 0, 1]) depth_data.append(preprocess(ros_numpy.numpify(msg))) elif vel[5] > 0 and vel[0] > 0: # turning left commands.append([1, 0, 0]) depth_data.append(preprocess(ros_numpy.numpify(msg))) elif round(vel[5], 1) == 0 and vel[0] > 0: # moving forward commands.append([0, 1, 0]) depth_data.append(preprocess(ros_numpy.numpify(msg))) # print(preprocess(ros_numpy.numpify(msg))) print(len(depth_data), len(commands)) print(commands[-1])
def test_vector3(self): v = Vector3(1, 2, 3) v_arr = ros_numpy.numpify(v) np.testing.assert_array_equal(v_arr, [1, 2, 3]) v_arrh = ros_numpy.numpify(v, hom=True) np.testing.assert_array_equal(v_arrh, [1, 2, 3, 0]) self.assertEqual(v, ros_numpy.msgify(Vector3, v_arr)) self.assertEqual(v, ros_numpy.msgify(Vector3, v_arrh)) with self.assertRaises(AssertionError): ros_numpy.msgify(Vector3, np.array([0, 0, 0, 1]))
def on_enter(self, userdata): gate = gates[userdata.leg_number] if not gate.has_seen(): self._error = True return point = numpify(gate.position.point) normal = numpify(gate.normal.vector) target_pose = PoseStamped() target_pose.header.frame_id = gate.position.header.frame_id target_pose.pose.position = msgify(Point, point + normal * self._offset) target_pose.pose.orientation = msgify(Quaternion, normal_to_quaternion(-normal)) userdata.target_pose = target_pose
def save_calib_file(name, calibration_matrices, p0, tf_buffer, stamp): file = open(name, "w+") frames = get_camera_frames() K1 = calibration_matrices[0] K2 = calibration_matrices[1] K3 = calibration_matrices[2] source = frames[0] # camera_0 is source camera lidar_frame = "X1/laser/laser" transform0 = tf_buffer.lookup_transform_core(source, source, stamp) transform1 = tf_buffer.lookup_transform_core(source, frames[1], stamp) transform2 = tf_buffer.lookup_transform_core(source, frames[2], stamp) transform3 = tf_buffer.lookup_transform_core(source, frames[3], stamp) T0 = numpify(transform0.transform) T1 = numpify(transform1.transform) T2 = numpify(transform2.transform) T3 = numpify(transform3.transform) K1 = np.array(K1).reshape(3, 3) K2 = np.array(K2).reshape(3, 3) K3 = np.array(K3).reshape(3, 3) p1 = np.dot(K1, T1[:3, :]) p2 = np.dot(K2, T2[:3, :]) p3 = np.dot(K3, T3[:3, :]) p0_str = str(p0)[1:-1] p1_str = get_matrix_as_string(p1) p2_str = get_matrix_as_string(p2) p3_str = get_matrix_as_string(p3) R0 = T0[:3, :3] r0_str = get_matrix_as_string(R0) velo_to_cam_transform = tf_buffer.lookup_transform_core( lidar_frame, source, rospy.Time(0)) Tr_velo_to_cam = numpify(velo_to_cam_transform.transform)[:3, :] Tr_velo_to_cam_str = get_matrix_as_string(Tr_velo_to_cam) file.write('P0: ' + p0_str + '\n') file.write('P1: ' + p1_str + '\n') file.write('P2: ' + p2_str + '\n') file.write('P3: ' + p3_str + '\n') file.write('R0: ' + r0_str + '\n') file.write('Tr_velo_to_cam: ' + Tr_velo_to_cam_str + '\n') file.close()
def test_roundtrip_little_endian(self): arr = np.random.randint(0, 256, size=(240, 360)).astype('<u2') msg = ros_numpy.msgify(Image, arr, encoding='mono16') self.assertEqual(msg.is_bigendian, False) arr2 = ros_numpy.numpify(msg) np.testing.assert_equal(arr, arr2)
def get_position(self, cx, cy, p_matrix, header): """ given the image location, camera matrix, and frame/stamp, get the 3D position Assumes the point lies on the ground plane """ try: tf = self.tf_buffer.lookup_transform( target_frame=header.frame_id.lstrip("/"), source_frame="base_link", time=header.stamp ).transform except (tf2_ros.LookupException, tf2_ros.ExtrapolationException) as e: rospy.logwarn("TF error %s", e) return tf = numpify(tf) p_prime = p_matrix.dot(tf) p_prime = np.hstack((p_prime[:,0:2],p_prime[:,3:4])) pixel_mat = np.array([cx, cy, 1]) marker_position = inv(p_prime).dot(pixel_mat) marker_position /= marker_position[2] if marker_position[0]<=0: return np.array([np.nan,np.nan,np.nan]) marker_position[2] = 0 # z = 0 return marker_position
def __pcl_cb(self, data): pc = ros_numpy.numpify(data) points = np.zeros((pc.shape[0], pc.shape[1], 3)) points[:, :, 0] = pc['x'] points[:, :, 1] = pc['y'] points[:, :, 2] = pc['z'] self.__points_list = points
def sub_image(self, im_msg): """ Take in an image, and publish a black and white image indicatng which pixels are cone-like """ if self.lookup is None: return old_message_header = deepcopy(im_msg.header) # Announce that we have received a packet self.pub_packet_entered_node.publish(old_message_header) # extract the image im = numpify(im_msg) # Downsample. if self.downsample < 1: im = scipy.misc.imresize(im, self.downsample) im_mask = self.lookup[self.lookup_idx(im)] if self.morph_open: open_size = self.morph_size open_structure = np.ones((open_size, open_size)) im_mask = scipy.ndimage.morphology.binary_opening( im_mask, iterations=self.morph_iter, structure=open_structure) im_mask = (im_mask * 255).astype(np.uint8) # build the message im_mask_msg = msgify(Image, im_mask, encoding='mono8') im_mask_msg.header = old_message_header self.pub_mask.publish(im_mask_msg) # Announce that a packet has left the node. self.pub_packet_left_node.publish(old_message_header)
def sub_image(self, im_msg, image_info): """ Take a thresholded image and output marker position metrics. """ print("sub_image in marker_locator") # Announce that we have received a packet self.pub_packet_entered_node.publish(im_msg.header) # Extract the image im = numpify(im_msg) height, width = im.shape[:2] # Label each connected blob in the image. im2 = (im > 128).astype(bool) labels, num_labels = scipy.ndimage.measurements.label(im2) # no blobs? if num_labels == 0: self.pub_marker.publish(PointStamped( header=Header(frame_id="base_link", stamp=im_msg.header.stamp), point=Point(x=np.nan, y=np.nan, z=0) )) return # Calculate the size of each blob. blob_sizes = scipy.ndimage.measurements.sum(im2, labels=labels, index=xrange(0,num_labels+1)) # Find the biggest blob. biggest_blob = np.argmax(blob_sizes) - 1 pp = scipy.ndimage.measurements.find_objects(labels)[biggest_blob] bbox = BoundingBox(pp[1].start, pp[0].start, pp[1].stop, pp[0].stop) # Calculate centroid (units of pixels). cx, cy = np.mean([bbox.maxx, bbox.minx]), np.mean([bbox.maxy, bbox.miny]) p_matrix = np.array(image_info.P).reshape((3, 4)) marker_position = self.get_position(cx, cy, p_matrix, im_msg.header) if marker_position is None: return self.pub_marker.publish(PointStamped( header=Header(frame_id="base_link", stamp=im_msg.header.stamp), point=msgify(Point, marker_position) )) # Make overlay visualizations. debug_overlay = np.zeros((height, width, 3), dtype=np.uint8) debug_overlay[...,0] = im debug_overlay[...,1] = im debug_overlay[...,2] = im self.draw_bbox(debug_overlay, bbox) self.draw_point(debug_overlay, cx, cy) debug_overlay_msg = msgify(Image, debug_overlay, encoding='rgb8') debug_overlay_msg.header = im_msg.header self.pub_debug_overlay.publish(debug_overlay_msg) # Announce that a packet has left the node. self.pub_packet_left_node.publish(im_msg.header)
def sub_pose(self, pose): if self.use_ackermann_rrt2: goal = model.pose_from_ros(pose) else: goal = ros_numpy.numpify(pose.pose.position)[:2] if self.goal is None: self.goal = goal else: # update in place, so the rrt gets the new value automatically self.goal[...] = goal self.goal_changed = True rospy.loginfo("Updated goal to {}".format(self.goal))
def sub_marker(self, marker): marker_loc = numpify(marker.point, hom=True) if np.isnan(marker_loc).any(): self.marker_target_pose = None return trans = self.tf_buffer.lookup_transform( target_frame="base_link", source_frame=self.map_frame, time=marker.header.stamp, timeout=rospy.Duration(1) ) transform = numpify(trans.transform) marker_loc = marker_loc.dot(transform.T) marker_target_pose = PoseStamped() marker_target_pose.header = marker.header marker_target_pose.header.frame_id = 'map' marker_target_pose.pose = Pose( msgify(Point, marker_loc), Quaternion(0,0,0,1) ) self.marker_target_pose = marker_target_pose
def sub_scan(self, scan): if self.global_mcl_map is None or self.map_frame is None: return delay = (scan.header.stamp-rospy.Time.now()).to_sec() #rospy.loginfo('scan stamp, now = {}'.format(delay)) if delay < -.2: return far = scan.ranges>10 buffer_size = 10 far_buffered = np.convolve(far, np.ones(buffer_size).astype(bool))[0:len(far)] changes = np.where(far_buffered[:-1] != far_buffered[1:])[0] if len(changes) == 0: return group_sizes = np.diff(changes)[::2] max_group = np.argmax(group_sizes) target_index = (changes[2*max_group]+changes[2*max_group+1]-buffer_size)/2 rel_angle = scan.angle_min + target_index*scan.angle_increment trans = self.tf_buffer.lookup_transform( target_frame=self.map_frame, source_frame=scan.header.frame_id, time=scan.header.stamp, timeout=rospy.Duration(1) ) pos = trans.transform.translation orient = trans.transform.rotation # transform from scan to map transform = numpify(trans.transform) target_vec = self.TARGET_DIST * np.array([ np.cos(rel_angle), np.sin(rel_angle), 0, 1 ]).dot(transform.T) target_angle = model.pose_from_ros(trans).theta + rel_angle scan_target_pose = PoseStamped() scan_target_pose.header = scan.header scan_target_pose.header.frame_id = self.map_frame scan_target_pose.pose = Pose( Point(*target_vec[0:3]), Quaternion(0,0,np.sin(.5*target_angle),np.cos(.5*target_angle)) ) self.scan_target_pose = scan_target_pose rospy.loginfo("updated target pose")
def sub_image(self, im_msg): """ add pixel color from image """ # the first few frames are bad if self.skip > 0: self.skip -= 1 return # extract the image im = numpify(im_msg) cone_colors = im[self.mask,:] self.cone_color_list.append(cone_colors)
def sub_image(self, im_msg): """ Take in an image, and publish a black and white image indicatng which pixels are cone-like """ old_message_header = deepcopy(im_msg.header) # Announce that we have received a packet self.pub_packet_entered_node.publish(old_message_header) # extract the image im = numpify(im_msg) # Downsample. if self.downsample < 1: im = scipy.misc.imresize(im, self.downsample) if self.mode == 'linear': is_cone = np.ones(im.shape[:-1], dtype=np.bool) im = im.astype(np.float32) # red channel for theta in thetas: is_cone = is_cone & (im.dot(theta[:-1]) + theta[-1] > 0) im_mask = np.uint8(is_cone * 255) elif self.mode == 'lookup': self._init_lookup() im_mask = self.lookup[self.lookup_idx(im)] else: raise ValueError("Unrecognized thresholding mode.") if self.morph_open: open_size = self.morph_size open_structure = np.ones((open_size, open_size)) im_mask = scipy.ndimage.morphology.binary_opening( im_mask, iterations=self.morph_iter, structure=open_structure) im_mask = (im_mask * 255).astype(np.uint8) # build the message im_mask_msg = msgify(Image, im_mask, encoding='mono8') im_mask_msg.header = old_message_header self.pub_mask.publish(im_mask_msg) # Announce that a packet has left the node. self.pub_packet_left_node.publish(old_message_header)
def test_pose(self): t = Pose( position=Point(1.0, 2.0, 3.0), orientation=Quaternion(*transformations.quaternion_from_euler(np.pi, 0, 0)) ) t_mat = ros_numpy.numpify(t) np.testing.assert_allclose(t_mat.dot([0, 0, 1, 1]), [1.0, 2.0, 2.0, 1.0]) msg = ros_numpy.msgify(Pose, t_mat) np.testing.assert_allclose(msg.position.x, t.position.x) np.testing.assert_allclose(msg.position.y, t.position.y) np.testing.assert_allclose(msg.position.z, t.position.z) np.testing.assert_allclose(msg.orientation.x, t.orientation.x) np.testing.assert_allclose(msg.orientation.y, t.orientation.y) np.testing.assert_allclose(msg.orientation.z, t.orientation.z) np.testing.assert_allclose(msg.orientation.w, t.orientation.w)
def test_transform(self): t = Transform( translation=Vector3(1, 2, 3), rotation=Quaternion(*transformations.quaternion_from_euler(np.pi, 0, 0)) ) t_mat = ros_numpy.numpify(t) np.testing.assert_allclose(t_mat.dot([0, 0, 1, 1]), [1.0, 2.0, 2.0, 1.0]) msg = ros_numpy.msgify(Transform, t_mat) np.testing.assert_allclose(msg.translation.x, t.translation.x) np.testing.assert_allclose(msg.translation.y, t.translation.y) np.testing.assert_allclose(msg.translation.z, t.translation.z) np.testing.assert_allclose(msg.rotation.x, t.rotation.x) np.testing.assert_allclose(msg.rotation.y, t.rotation.y) np.testing.assert_allclose(msg.rotation.z, t.rotation.z) np.testing.assert_allclose(msg.rotation.w, t.rotation.w)
def sub_scan(self, scan): if self.map is None or self.base_frame is None: rospy.loginfo('Not processing data until map frame found') return # get the transformation matrix from base_link to laser trans = self.tf_buffer.lookup_transform(self.base_frame, scan.header.frame_id, scan.header.stamp) trans = ros_numpy.numpify(trans.transform) rospy.loginfo('Doing update') weights = mcl.ros_sensor_update.sensor_update(self.map, scan, self.particles, trans, 10) rospy.loginfo('Resampling') with self.particle_lock: # we don't actually need the lock when calculating weights, unless our implementation allows the number of particles to increase self.particles = mcl.particle_filter.resample( self.particles, weights, self.num_parts ) self._publish_tf(scan.header.stamp)
def Ros2CV(self): # Convert the ros image to numpy for use with opencv image = ros_numpy.numpify(self.image) # Separate the colour channels, so that the colours can be flipped to rgb # Slice will do the same thing as split but less costly # #b, g, r = cv2.split(cv_image) #cv_image = cv2.merge((r, g, b)) # # Copy the Content of the slice r = np.zeros((image.shape[0], image.shape[1]), dtype = image.dtype) g = np.zeros((image.shape[0], image.shape[1]), dtype = image.dtype) b = np.zeros((image.shape[0], image.shape[1]), dtype = image.dtype) r[:, :] = image[:, :, 2] g[:, :] = image[:, :, 1] b[:, :] = image[:, :, 0] # Merge the colours in rgb format cv_image = cv2.merge((r, g, b)) # Return the converted image return cv_image
def __init__(self, ros_map=None, map_info=None, frame=None, grid=None): # for backwards compatibility, accept a OccupancyGrid if ros_map is not None: assert isinstance(ros_map, (OccupancyGrid, ros_numpy.numpy_msg(OccupancyGrid))) if map_info is not None or frame is not None or grid is not None: raise ValueError('If ros_map is specified, it must be the only argument') map_info = ros_map.info frame = ros_map.header.frame_id grid = ros_numpy.numpify(ros_map) else: assert map_info is not None assert frame is not None assert grid is not None self.info = map_info self.frame = frame self.grid = grid self.resolution = map_info.resolution self.pos = map_info.origin.position self.orient = map_info.origin.orientation # transform from index to world self.transform = np.dot( transformations.translation_matrix([self.pos.x, self.pos.y, self.pos.z]), transformations.quaternion_matrix([self.orient.x, self.orient.y, self.orient.z, self.orient.w]) ).dot( transformations.scale_matrix(map_info.resolution) ) idxs = [0, 1, 3] self.inv_transform = np.linalg.inv(self.transform)[idxs,:] self.transform = self.transform[:,idxs]
def handle_disparity_image(self, image): """ Handles the kinect data, calling the calibration functions, coordinate transformations, and filtering the data using a discrete Kalman filter with a 3D double integrator model. """ np_image = ros_numpy.numpify(image) np_image_rel = np_image if self.cal_frame < self.calibrationLimit: # Calibrates background if not self.cal_frame: print '\n(@ kinectNode) Calibrating background...' self.print_progress(self.cal_frame, self.calibrationLimit-1, "(@ kinectNode) Progress:",'Complete') if self.background is None: self.background = np.zeros(np_image.shape) self.background += np_image / 30.0 self.cal_frame += 1 mask = np.isnan(self.background) self.background[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), self.background[~mask]) else: # Calibrates angle np_image_rel = self.background - np_image if self.angle is None: print '(@ kinectNode) Calibrating angle...' singval = self.calibrate_angle_SVD() print '(@ kinectNode) SVD: %f [deg], singular values %s' % (180/pi * self.angle, str(singval)) best_r = self.calibrate_angle_polyfit() print '(@ kinectNode) Polyfit: %f [deg], residual r = %f' % (180/pi * self.angle, best_r[0]) print '(@ kinectNode) Calibration complete!\n' self.status_pub.publish('True') # Runs in regular mode, publishing the position in the global # coordinate system i, j = np.mean(np.where(np_image_rel>(np.nanmax(np_image_rel) - self.backgroundLimit)), axis=1) if self.plot: if self.ims is None: self.ims = plt.figure(1) self.ims = plt.imshow(np_image_rel, vmin = 0, vmax = 5) plt.colorbar() else: self.ims.set_data(np_image_rel) if self.scatter is not None: self.scatter.remove() self.scatter = plt.scatter([j], [i], color='red') plt.draw() plt.pause(0.01) x, y, z = self.point_from_ij(i, j, np_image) p_raw = Point(x=x, y=y, z=z) self.pos_raw_pub.publish(p_raw) # Kalman filter update - computes, updates and prints the position # and covariance if self.useKalman: zk = np.array([x,y,z]) # Treats the case when a measurement is missed if np.isnan(zk).any() or norm(zk - self.xhat[0:3]) > self.kalmanLimit*norm(np.diag(self.P)[0:3]): zk = None self.xhat, self.P = self.discrete_KF_update(self.xhat, [], zk, self.A, [], self.C, self.P, self.Q, self.R) p = Point(x=self.xhat[0], y=self.xhat[1], z=self.xhat[2]) v = Point(x=self.xhat[3], y=self.xhat[4], z=self.xhat[5]) self.pos_pub.publish(p) self.vel_pub.publish(v)
def test_roundtrip_rgb8(self): arr = np.random.randint(0, 256, size=(240, 360, 3)).astype(np.uint8) msg = ros_numpy.msgify(Image, arr, encoding='rgb8') arr2 = ros_numpy.numpify(msg) np.testing.assert_equal(arr, arr2)
def sub_image(self, im_msg, image_info): """ Take a thresholded image and output cone position metrics. """ # Announce that we have received a packet self.pub_packet_entered_node.publish(im_msg.header) # Extract the image im = numpify(im_msg) im2 = (im > 160).astype(bool) height, width = im.shape[:2] # Label each connected blob in the image. labels, num_labels = scipy.ndimage.measurements.label(im2) msg = CameraObjectsStamped(header=im_msg.header) if num_labels == 0: self.pub_obj.publish(msg) # No objects. return # Calculate the size of each blob. blob_sizes = scipy.ndimage.measurements.sum(im2, labels=labels, index=xrange(0,num_labels+1)) # Find the biggest blob. biggest_blob = np.argmax(blob_sizes) - 1 pp = scipy.ndimage.measurements.find_objects(labels)[biggest_blob] bbox = BoundingBox(pp[1].start, pp[0].start, pp[1].stop, pp[0].stop) # Calculate centroid (units of pixels). cx, cy = np.mean([bbox.maxx, bbox.minx]), np.mean([bbox.maxy, bbox.miny]) sx, sy = (bbox.maxx - bbox.minx), (bbox.maxy - bbox.miny) # geometric mean p_matrix = np.array(image_info.P).reshape((3, 4)) uvw = p_matrix.dot([self.WIDTH, self.HEIGHT, 0, 0]) u, v, w = uvw cz = np.min([(u / sx), (v / sy)]) # load the depth image message # d_im = numpify(d_im_msg) * 0.001 # TODO # cz = np.mean(d_im[(labels == biggest_blob) & (d_im != 0)]) # get_depth_for(biggest_blob) # print u / sx, v / sy, sx, sy, uvw #, np.max(d_im) # projection matrix # [px, py, 1] = P.dot([X, Y, Z, 1]) # solve the simultaneous equations # [cx, cy, 1] = P.dot([X, Y, Z, 1]) # cz = Z # => 0 = np.array([0, 0, 1, -cz]).dot([X, Y, Z, 1]) try: position = np.linalg.solve( np.vstack(( p_matrix, [0, 0, 1, -cz] )), np.array([cx, cy, 1, 0]) ) except np.linalg.LinAlgError: rospy.logwarn("Matrix was singular %s", np.vstack(( p_matrix, [0, 0, 1, -cz] ))) return # theses are homogeneous coordinates position /= position[-1] position = Point(*position[:3]) msg.objects = [ CameraObject( label='cone', center=position, size=Vector3( x=remap((bbox.maxx - bbox.minx), 0, width, 0, 1), y=remap((bbox.maxy - bbox.miny), 0, height, 0, 1), z=0 ) ) ] self.pub_obj.publish(msg) # Make overlay visualizations. debug_overlay = np.zeros((height, width, 3), dtype=np.uint8) debug_overlay[...,0] = im debug_overlay[...,1] = im debug_overlay[...,2] = im self.draw_bbox(debug_overlay, bbox) self.draw_point(debug_overlay, cx, cy) debug_overlay_msg = msgify(Image, debug_overlay, encoding='rgb8') debug_overlay_msg.header = im_msg.header self.pub_debug_overlay.publish(debug_overlay_msg) # Announce that a packet has left the node. self.pub_packet_left_node.publish(im_msg.header)
def handle_disparity_image(self, image): if self.useDummy: np_image = np.reshape(image.data, (480,640)) else: np_image = ros_numpy.numpify(image) np_image_rel = np_image if self.cal_frames < 30: # Calibrates background if not self.cal_frames: print '\n(@ kinectNode) Calibrating background' cl.print_progress(self.cal_frames, 30-1,prefix="Progress:", suffix = 'Complete', barLength = 50) if self.background is None: self.background = np.zeros(np_image.shape) self.background += np_image / 30.0 self.cal_frames += 1 mask = np.isnan(self.background) self.background[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), self.background[~mask]) else: # Calibrates angle np_image_rel = self.background - np_image if self.angle is None: print '(@ kinectNode) Calibrating angle' self.calibrate_angle_SVD() print 'Using SVD: %f degrees' % (180/3.1415 * self.angle) self.calibrate_angle() print 'Using poyfit: %f degrees\n' % (180/3.1415 * self.angle) print '(@ kinectNode) Calibration complete!\n' self.status_pub.publish('True') # Runs in regular mode, publishing the position in the global coordinate system i, j = np.mean(np.where(np_image_rel>(np.nanmax(np_image_rel)-0.1)), axis=1) if self.plot: if self.ims is None: self.ims = plt.imshow(np_image_rel, vmin = 0, vmax = 5) plt.colorbar() else: self.ims.set_data(np_image_rel) if self.scatter is not None: self.scatter.remove() self.scatter = plt.scatter([j], [i], color='red') plt.draw() plt.pause(0.01) x, y, z = self.point_from_ij(i, j, np_image) p_raw = Point(x=x, y=y, z=z) self.point_pub_raw.publish(p_raw) # Kalman filter update - This currently just computes, updates and prints the position and covariance if self.useKalman: zk = np.array([x,y,z]) if np.isnan(zk).any() or norm(zk - self.xhat[0:3]) > self.kalmanLimit*norm(np.diag(self.P)[0:3]): # Treats the case when a measurement is missed #print 'measurement error: %.3f' % norm(zk-self.xhat[0:3]) #print 'co-variance norm: %.3f' % norm(np.diag(self.P)[0:3]) #print "Throw away" zk = None self.xhat, self.P = cl.discrete_KF_update(self.xhat, [], zk, self.A, [], self.C, self.P, self.Q, self.R) x, y, z = self.xhat[0], self.xhat[1], self.xhat[2] p = Point(x=x, y=y, z=z) self.point_pub.publish(p)
import numpy as np fname = os.path.expanduser("~/team-ws/bags/2016-02-26-20-16-28-coneshadow.bag") frames = [] durations = [] last_t = None with rosbag.Bag(fname) as bag: for i, (topic, msg, t) in enumerate(bag.read_messages(topics=['/camera/zed/rgb/image_rect_color'])): if i < 5: continue print i if not last_t: dt = t - t else: dt = t - last_t msg.__class__ = Image frame = ros_numpy.numpify(msg)[...,::-1] frame = PIL.Image.fromarray(frame) frame.thumbnail((320, 180), PIL.Image.ANTIALIAS) frame = np.array(frame) frames.append(frame) durations.append(dt.to_sec()) last_t = t writeGif('sample-data-shadow.gif', frames, durations)
def timer_callback(self, event): if self.map is None: rospy.logwarn("No map") return if self.goal is None: rospy.logwarn("No goal") return try: tf = self.tf_buffer.lookup_transform( target_frame=self.map.frame, source_frame=self.base_frame, time=event.current_real, timeout=rospy.Duration(0.1) ).transform except (tf2_ros.LookupException, tf2_ros.ExtrapolationException) as e: rospy.logwarn("TF error getting robot pose", exc_info=True) return # skip times when we are already off-map curr = self.map[self.map.index_at(ros_numpy.numpify(tf.translation))] if curr > 0 or curr is np.ma.masked: rospy.logwarn("Current node is not valid") return # don't do anything if we requested single-shot planning if self.replan == 'on_goal_change' and not self.goal_changed: return self.goal_changed = False # determine our pose if self.use_ackermann_rrt or self.use_ackermann_rrt2: pose = model.pose_from_ros(tf) else: pose = ros_numpy.numpify(tf.translation)[:2] # either reuse or throw out the RRT if not self.rrt: self.start_over(pose) else: if self.use_ackermann_rrt2: pose_lookup = pose elif self.use_ackermann_rrt: pose_lookup = pose.xy else: pose_lookup = pose # decide whether we can reroot the rrt nearest, dist, _ = self.rrt.get_nearest_node(pose_lookup, exchanged=True) if dist > self.reroute_threshold: rospy.loginfo("Nearest node is {}m away, ignoring".format(dist)) self.start_over(pose) else: rospy.loginfo("Nearest node is {}m away, rerooting and pruning".format(dist)) nearest.make_root() self.rrt.prune() self.pub_vis_tree.publish(rrtutils.delete_marker_array_msg) if self.do_profile: # run and profile the rrt pr = cProfile.Profile() pr.enable() path = list(self.rrt.run()) if self.do_profile: pr.disable() pr.dump_stats(os.path.join( rospkg.RosPack().get_path('lab6'), 'rrt_stats.txt' )) # process the results self.send_debug_tree(path) self.publish_path(path) rospy.loginfo('Planned!')