def remove_white_pixel(msg, points_, vis=False): points_with_c_ = pointclouds.pointcloud2_to_array(msg) points_with_c_ = pointclouds.split_rgb_field(points_with_c_) r = np.asarray(points_with_c_['r'], dtype=np.uint32) g = np.asarray(points_with_c_['g'], dtype=np.uint32) b = np.asarray(points_with_c_['b'], dtype=np.uint32) rgb_colors = np.vstack([r, g, b]).T # rgb = rgb_colors.astype(np.float) / 255 ind_good_points_ = np.sum(rgb_colors[:] < 210, axis=-1) == 3 ind_good_points_ = np.where(ind_good_points_ == 1)[0] new_points_ = points_[ind_good_points_] if vis: p = points_ mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(1, 0, 0)) p = new_points_ mlab.points3d(p[:, 0], p[:, 1], p[:, 2], scale_factor=0.002, color=(0, 0, 1)) mlab.points3d(0, 0, 0, scale_factor=0.01, color=(0, 1, 0)) # plot 0 point mlab.show() return new_points_
def compute_centroid (self, cloud): arr = pointclouds.pointcloud2_to_array(cloud, 0) arr_xyz = pointclouds.get_xyz_points(arr) min_x = sys.float_info.max min_y = sys.float_info.max min_z = sys.float_info.max max_x = -sys.float_info.max max_y = -sys.float_info.max max_z = -sys.float_info.max for j in range (arr_xyz.__len__()): if arr_xyz[j][0] > max_x: max_x = arr_xyz[j][0] if arr_xyz[j][1] > max_y: max_y = arr_xyz[j][1] if arr_xyz[j][2] > max_z: max_z = arr_xyz[j][2] if arr_xyz[j][0] < min_x: min_x = arr_xyz[j][0] if arr_xyz[j][1] < min_y: min_y = arr_xyz[j][1] if arr_xyz[j][2] < min_z: min_z = arr_xyz[j][2] c_x = (max_x + min_x)/2. c_y = (max_y + min_y)/2. c_z = (max_z + min_z)/2. # print "c_x: ", c_x, " ", "c_y: ", c_y, "c_z: ", c_z # print "min, max", min_x, " ", max_x, " ", min_y, " ", max_y, " ", min_z, " ", max_z return [c_x, c_y, c_z]
def callback(self, data): rospy.loginfo("Objects %d", data.objects.__len__()) table_corners = [] # obtain table_offset and table_pose to = rospy.wait_for_message(self.table_topic, MarkerArray); # obtain Table corners ... rospy.loginfo("Tables hull size %d", to.markers.__len__()) if not to.markers: rospy.loginfo("No tables detected") return else: # NB - D says that ORK has this set to filter based on height. # IIRC, it's 0.6-0.8m above whatever frame is set as the floor point_array_size = 4 # for the 4 corners of the bounding box for i in range (0, point_array_size): p = Point() p.x = to.markers[0].points[i].x p.y = to.markers[0].points[i].y p.z = to.markers[0].points[i].z table_corners.append(p) # this is a table pose at the edge close to the robot, in the center of x axis table_pose = PoseStamped() table_pose.header = to.markers[0].header table_pose.pose = to.markers[0].pose # Determine table dimensions rospy.loginfo('calculating table pose bounding box in frame: %s' % table_pose.header.frame_id) min_x = sys.float_info.max min_y = sys.float_info.max max_x = -sys.float_info.max max_y = -sys.float_info.max for i in range (table_corners.__len__()): if table_corners[i].x > max_x: max_x = table_corners[i].x if table_corners[i].y > max_y: max_y = table_corners[i].y if table_corners[i].x < min_x: min_x = table_corners[i].x if table_corners[i].y < min_y: min_y = table_corners[i].y table_dim = Point() # TODO: if we don't (can't!) compute the height, should we at least give it non-zero depth? # (would also require shifting the observed centroid down by table_dim.z/2) table_dim.z = 0.0 table_dim.x = abs(max_x - min_x) table_dim.y = abs(max_y - min_y) print "Dimensions: ", table_dim.x, table_dim.y # Temporary frame used for transformations table_link = 'table_link' # centroid of a table in table_link frame centroid = PoseStamped() centroid.header.frame_id = table_link centroid.header.stamp = table_pose.header.stamp centroid.pose.position.x = (max_x + min_x)/2. centroid.pose.position.y = (max_y + min_y)/2. centroid.pose.position.z = 0.0 centroid.pose.orientation.x = 0.0 centroid.pose.orientation.y = 0.0 centroid.pose.orientation.z = 0.0 centroid.pose.orientation.w = 1.0 # generate transform from table_pose to our newly-defined table_link tt = TransformStamped() tt.header = table_pose.header tt.child_frame_id = table_link tt.transform.translation = table_pose.pose.position tt.transform.rotation = table_pose.pose.orientation self.tl.setTransform(tt) self.tl.waitForTransform(table_link, table_pose.header.frame_id, table_pose.header.stamp, rospy.Duration(3.0)) if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp): centroid_table_pose = self.tl.transformPose(table_pose.header.frame_id, centroid) self.pose_pub.publish(centroid_table_pose) else: rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link) return # transform each object into desired frame; add to list of clusters cluster_list = [] for i in range (data.objects.__len__()): rospy.loginfo("Point clouds %s", data.objects[i].point_clouds.__len__()) pc = PointCloud2() pc = data.objects[i].point_clouds[0] arr = pointclouds.pointcloud2_to_array(pc, 1) arr_xyz = pointclouds.get_xyz_points(arr) arr_xyz_trans = [] for j in range (arr_xyz.__len__()): ps = PointStamped(); ps.header.frame_id = table_link ps.header.stamp = table_pose.header.stamp ps.point.x = arr_xyz[j][0] ps.point.y = arr_xyz[j][1] ps.point.z = arr_xyz[j][2] if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp): ps_in_kinect_frame = self.tl.transformPoint(table_pose.header.frame_id, ps) else: rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link) return arr_xyz_trans.append([ps_in_kinect_frame.point.x, ps_in_kinect_frame.point.y, ps_in_kinect_frame.point.z]) pc_trans = PointCloud2() pc_trans = pointclouds.xyz_array_to_pointcloud2(np.asarray([arr_xyz_trans]), table_pose.header.stamp, table_pose.header.frame_id) self.pub.publish(pc_trans) cluster_list.append(pc_trans) rospy.loginfo("cluster size %d", cluster_list.__len__()) # finally - save all data in the object that'll be sent in response to actionserver requests with self.result_lock: self._result.objects = cluster_list self._result.table_dims = table_dim self._result.table_pose = centroid_table_pose self.has_data = True
def find_the_green_dot(): def l2_norm(p1, p2): return (p1['x'] - p2['x']) * (p1['x'] - p2['x']) + ( p1['y'] - p2['y']) * (p1['y'] - p2['y']) + (p1['z'] - p2['z']) * ( p1['z'] - p2['z']) print("Finding the green dot...") print("\tGrabbing pointcloud...") data = cutil.ros_topic_get("camera/depth_registered/points", PointCloud2) cloud = pointclouds.pointcloud2_to_array(data, split_rgb=True) max_dist = 0.015 * 0.015 colored_cloud = cloud[(cloud['b'] < 128) * (cloud['r'] < 120) * (cloud['g'] > 128)] print("\tGot " + str(len(colored_cloud)) + " green points. Constructing the graph...") #now lets construct a graph g = nx.Graph() for i1, p1 in enumerate(colored_cloud): for i2, p2 in enumerate(colored_cloud): if i2 <= i1: continue g.add_node(i1) g.add_node(i2) d = l2_norm(p1, p2) if d <= max_dist: g.add_edge(i1, i2) print("\tfinding cliques...") cliques = list(nx.find_cliques(g)) print("\tfound cliques = " + str(len(cliques))) #now keep only nodes in the largest cliques max_clique = 0 for c in cliques: if len(c) > max_clique: max_clique = len(c) popular_nodes = [] num_max_cliques = 0 for c in cliques: if len(c) < max_clique: continue num_max_cliques = num_max_cliques + 1 for i in c: if i not in popular_nodes: popular_nodes.extend([i]) for node in g.nodes(): if node not in popular_nodes: g.remove_node(node) print("\tThe largest clique was " + str(max_clique) + ", and there were " + str(num_max_cliques) + " cliques of that size.") print("\tConstructing marker...") #now let's construct the marker array to output what we found avgx = 0.0 avgy = 0.0 avgz = 0.0 for node in g.nodes_iter(): avgx += colored_cloud[node]['x'] avgy += colored_cloud[node]['y'] avgz += colored_cloud[node]['z'] print("\tnumber of nodes = " + str(g.number_of_nodes())) avgx /= g.number_of_nodes() avgy /= g.number_of_nodes() avgz /= g.number_of_nodes() print("Found marker at (" + str(avgx) + ", " + str(avgy) + ", " + str(avgz) + ")") return Point(x=avgx, y=avgy, z=avgz), data.header.frame_id
def callback(self, data): rospy.loginfo("Recognized objects %d", data.objects.__len__()) table_corners = [] # obtain table_offset and table_pose to = rospy.wait_for_message(self.table_topic, MarkerArray) # obtain Table corners ... rospy.loginfo("Tables hull size %d", to.markers.__len__()) if not to.markers: rospy.loginfo("No tables detected") return else: # NB - D says that ORK has this set to filter based on height. # IIRC, it's 0.6-0.8m above whatever frame is set as the floor point_array_size = 4 # for the 4 corners of the bounding box for i in range (0, point_array_size): p = Point() p.x = to.markers[0].points[i].x p.y = to.markers[0].points[i].y p.z = to.markers[0].points[i].z table_corners.append(p) # this is a table pose at the edge close to the robot, in the center of x axis table_pose = PoseStamped() table_pose.header = to.markers[0].header table_pose.pose = to.markers[0].pose # Determine table dimensions rospy.loginfo('calculating table pose bounding box in frame: %s' % table_pose.header.frame_id) min_x = sys.float_info.max min_y = sys.float_info.max max_x = -sys.float_info.max max_y = -sys.float_info.max for i in range (table_corners.__len__()): if table_corners[i].x > max_x: max_x = table_corners[i].x if table_corners[i].y > max_y: max_y = table_corners[i].y if table_corners[i].x < min_x: min_x = table_corners[i].x if table_corners[i].y < min_y: min_y = table_corners[i].y table_dim = Point() # TODO: if we don't (can't!) compute the height, should we at least give it non-zero depth? # (would also require shifting the observed centroid down by table_dim.z/2) table_dim.z = 0.0 table_dim.x = abs(max_x - min_x) table_dim.y = abs(max_y - min_y) print "Dimensions: ", table_dim.x, table_dim.y # Temporary frame used for transformations table_link = 'table_link' # centroid of a table in table_link frame centroid = PoseStamped() centroid.header.frame_id = table_link centroid.header.stamp = table_pose.header.stamp centroid.pose.position.x = (max_x + min_x)/2. centroid.pose.position.y = (max_y + min_y)/2. centroid.pose.position.z = 0.0 centroid.pose.orientation.x = 0.0 centroid.pose.orientation.y = 0.0 centroid.pose.orientation.z = 0.0 centroid.pose.orientation.w = 1.0 # generate transform from table_pose to our newly-defined table_link tt = TransformStamped() tt.header = table_pose.header tt.child_frame_id = table_link tt.transform.translation = table_pose.pose.position tt.transform.rotation = table_pose.pose.orientation self.tl.setTransform(tt) self.tl.waitForTransform(table_link, table_pose.header.frame_id, table_pose.header.stamp, rospy.Duration(3.0)) if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp): centroid_table_pose = self.tl.transformPose(table_pose.header.frame_id, centroid) self.pose_pub.publish(centroid_table_pose) else: rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link) return # transform each object into desired frame; add to list of clusters cluster_list = [] object_list = [] #if only clusters on the table should be extracted if self.extract_clusters: cluster_list = self.extract_clusters_f() #else try to recognize objects if self.recognize_objects: for i in range (data.objects.__len__()): # rospy.loginfo("Point clouds %s", data.objects[i].point_clouds.__len__()) pc = PointCloud2() pc = data.objects[i].point_clouds[0] arr = pointclouds.pointcloud2_to_array(pc, 1) arr_xyz = pointclouds.get_xyz_points(arr) arr_xyz_trans = [] for j in range (arr_xyz.__len__()): ps = PointStamped(); ps.header.frame_id = table_link ps.header.stamp = table_pose.header.stamp ps.point.x = arr_xyz[j][0] ps.point.y = arr_xyz[j][1] ps.point.z = arr_xyz[j][2] if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp): ps_in_kinect_frame = self.tl.transformPoint(table_pose.header.frame_id, ps) else: rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link) return arr_xyz_trans.append([ps_in_kinect_frame.point.x, ps_in_kinect_frame.point.y, ps_in_kinect_frame.point.z]) pc_trans = PointCloud2() pc_trans = pointclouds.xyz_array_to_pointcloud2(np.asarray([arr_xyz_trans]), table_pose.header.stamp, table_pose.header.frame_id) #self.pub.publish(pc_trans) object_list.append(pc_trans) rospy.loginfo("object size %d", object_list.__len__()) cluster_centroids = [] object_centroids = [] for cloud in range (cluster_list.__len__()): cluster_centroids.append(self.compute_centroid(cluster_list[cloud])) for cloud in range (object_list.__len__()): object_centroids.append(self.compute_centroid(object_list[cloud])) recognized_objects = [] indices = [] for centroid in range (cluster_centroids.__len__()): # dist = self.closest(np_cluster_centroids, np.asarray(cluster_centroids[centroid])) if object_centroids: indices = self.do_kdtree(np.asarray(object_centroids), np.asarray(cluster_centroids[centroid]), self.search_radius) if not indices: recognized_objects.append(0) else: recognized_objects.append(1) print "recognized objects: ", recognized_objects # finally - save all data in the object that'll be sent in response to actionserver requests with self.result_lock: self._result.objects = cluster_list self._result.recognized_objects = recognized_objects self._result.table_dims = table_dim self._result.table_pose = centroid_table_pose self.has_data = True
def kinectcallback(self, img_msg, points_msg): # self.spinner() scanner = zbar.ImageScanner() scanner.parse_config('enable') pil = img.fromstring('RGB', (img_msg.width, img_msg.height), img_msg.data, 'raw') pil = pil.convert("L") width, height = pil.size raw = pil.tostring() image = zbar.Image(width, height, "Y800", raw) scanner.scan(image) #Transform point cloud to the RGB frame #Doesn't work, transformPointCloud doesn't suport PC2 #points_msg = self.tfl.transformPointCloud("rgbd_cam_1_rgb_frame", points_msg) #NUMPY POINT CLOUD ZOOM ZOOM! temp_arr = pointclouds.pointcloud2_to_array(points_msg) new_dtype = [] for field_name in temp_arr.dtype.names: field_type, field_offset = temp_arr.dtype.fields[field_name] if not field_name == 'rgb': new_dtype.append((field_name, field_type)) cloud_arr = np.zeros(temp_arr.shape, new_dtype) cloud_arr['x'] = temp_arr['x'] cloud_arr['y'] = temp_arr['y'] cloud_arr['z'] = temp_arr['z'] avg_yaw = 0.0 avg_pitch = 0.0 avg_xyz = (0.0, 0.0, 0.0) avg_count = 0 codes = QRCodes() for symbol in image: #print "\b\b " #print "decoded", symbol.type, " time '%s.%s'"%(str(img_msg.header.stamp.secs),str(img_msg.header.stamp.nsecs))," at ",time.time() # Print Time Stamps #print "i:",img_msg.header.stamp.secs, img_msg.header.stamp.nsecs #print "p:",points_msg.header.stamp.secs, points_msg.header.stamp.nsecs # Don't try to decode anything that is not recognized as a QRCode if not str(symbol.type) == "QRCODE": continue locs = list() for x in symbol.location: locs.append((x[0] * 640 / width, x[1] * 480 / height)) #print "Detected '%s' @ %s.%s"%(symbol.data,str(img_msg.header.stamp.secs),str(img_msg.header.stamp.nsecs)) # Translate from 2D to 3D points found = 0 output = list() output_arr = np.zeros(cloud_arr.shape, cloud_arr.dtype) # print "------" for pt in locs: #checks pixel at (x,y) and its neighbors to find one of them that's non-nan pts3d = read_offset_and_alternatives(cloud_arr, pt[0], pt[1], points_msg.width, points_msg.height) #print pt,"->", pts3d if pts3d == None: #one of our 4 points was nan, as were its 4 adjacent pixels break # print pt, " ", pts3d #received a valid point at (x,y) (or one of its neighbors) output.append(np.array((pts3d['x'], pts3d['y'], pts3d['z']))) output_arr[pt[0], pt[1]] = pts3d # print output # Output debug point cloud if len(output) > 0: output_cloud = pointclouds.array_to_pointcloud2( output_arr, stamp=rospy.Time.now(), frame_id=points_msg.header.frame_id) #print output_cloud # output_cloud = create_cloud_xyz(Header(frame_id=points_msg.header.frame_id), output) points_pub.publish(output_cloud) # Decode a pose if we have 3D info for the whole thing if len(output) == 4: # these poses are all non-nan. bow chicka wow wow # Find the plane # TODO: Right now I do this with a crossproduct, using 3 of the 4 points. # It would be better to do some kind of least squares optimization # Calculate the orientation quaternion of the vector normal to the plane norm = np.cross(output[1] - output[0], output[3] - output[0]) norm = normalize(norm) yaw = math.atan2(norm[1], norm[0]) pitch = -math.atan2( norm[2], math.sqrt(norm[0] * norm[0] + norm[1] * norm[1])) quat = quaternion_from_euler(0, pitch, yaw) # Determine the center of the plane midline_pts = closest_points(output[0], output[2], output[1], output[3]) center = midpoint(midline_pts[0], midline_pts[1]) # Get the "Front" of the QR Code, between the top two points front = midpoint(output[0], output[3]) forward = normalize(front - center) marker = BuildMarker("red", center[0], center[1], center[2], quat, "norm", points_msg.header.frame_id) marker1_pub.publish(marker) # Determine quaterion orientation for forward vector # NOTE: The foward vector and vector normal to the qrcode are not necessarily square! yaw = math.atan2(forward[1], forward[0]) pitch = -math.atan2( forward[2], math.sqrt(forward[0] * forward[0] + forward[1] * forward[1])) quat = quaternion_from_euler(0, pitch, yaw) marker = BuildMarker("blue", center[0], center[1], center[2], quat, "norm", points_msg.header.frame_id) marker2_pub.publish(marker) # Publish QRCode and Pose pose = Pose() pose.position.x = center[0] pose.position.y = center[1] pose.position.z = center[2] pose.orientation.x = quat[0] pose.orientation.y = quat[1] pose.orientation.z = quat[2] pose.orientation.w = quat[3] code = QRCode() a, b, c, d = locs[0], locs[1], locs[2], locs[3] code.location.append(Point(x=a[0], y=a[1])) code.location.append(Point(x=b[0], y=b[1])) code.location.append(Point(x=c[0], y=c[1])) code.location.append(Point(x=d[0], y=d[1])) code.data = symbol.data #code.image = img_msg code.pose = pose codes.codes.append(code) # Publish the pose by itself msg = PoseStamped() msg.header.frame_id = points_msg.header.frame_id msg.pose = pose pose_pub.publish(msg) if len(codes.codes) > 0: qrcode_pub.publish(codes) del (image)