def offset_calc(data): pub = rospy.Publisher('x_y_offseter', Point) msg = Point() if data.num_satellites < 3: return None global INITIAL_LATITUDE global INITIAL_LONGITUDE rospy.loginfo('INIT_LONG: ' + str(INITIAL_LONGITUDE) + ' INIT_LAT: ' + str(INITIAL_LATITUDE)) rospy.loginfo('data_long: ' + str(data.longitude) + ' data_lat: ' + str(data.latitude)) if INITIAL_LONGITUDE == data.longitude and INITIAL_LATITUDE == data.latitude: msg.x = 0 msg.y = 0 else: msg.y = haversine_distance(INITIAL_LONGITUDE, INITIAL_LATITUDE, INITIAL_LONGITUDE, data.latitude) msg.x = haversine_distance(INITIAL_LONGITUDE, INITIAL_LATITUDE, data.longitude , INITIAL_LATITUDE) if data.longitude < INITIAL_LONGITUDE: msg.x = -msg.x if data.latitude < INITIAL_LATITUDE: msg.y = -msg.y rospy.loginfo('Publishing to topic gps_data:\n' + 'x: ' + str(msg.x) + '\ny: ' + str(msg.y) + '\n\n') pub.publish(msg)
def publishCells(grid): global wallpub print "publishing" k = 0 cells = GridCells() cells.header.frame_id = "map" cells.cell_width = 0.3 # edit for grid size .3 for simple map cells.cell_height = 0.3 # edit for grid size for i in range(1, height): # height should be set to hieght of grid for j in range(1, width): # height should be set to hieght of grid # print k # used for debugging if grid[k] == 100: point = Point() point.x = j * 0.3 + 0.32 # edit for grid size point.y = i * 0.3 - 0.15 # edit for grid size point.z = 0 cells.cells.append(point) k = k + 1 k = k + 1 if grid[k] == 100: point = Point() point.x = j * 0.3 + 0.62 # edit for grid size point.y = i * 0.3 - 0.15 # edit for grid size point.z = 0 cells.cells.append(point) # print cells # used for debugging wallpub.publish(cells)
def construct(self, int_marker): x1 = self.node1.x y1 = self.node1.y z1 = self.node1.z x2 = self.node2.x y2 = self.node2.y z2 = self.node2.z color = (80, 80, 80) a_marker = Marker() a_marker.type = Marker.ARROW a_marker.scale.x = .05 a_marker.scale.y = .1 a_marker.scale.z = .1 (a_marker.color.r, a_marker.color.g, a_marker.color.b) = color a_marker.color.a = .5 start = Point() end = Point() start.x = self.node1.x start.y = self.node1.y start.z = self.node1.z end.x = self.node2.x end.y = self.node2.y end.z = self.node2.z a_marker.points.append(start) a_marker.points.append(end) a_control = InteractiveMarkerControl() a_control.always_visible = True a_control.markers.append( a_marker ) int_marker.controls.append(a_control) return(int_marker)
def pub_arrow(self, pos1, pos2, color, mag_force): marker = Marker() marker.header.frame_id = self.frame marker.header.stamp = rospy.Time.now() marker.ns = "basic_shapes" marker.id = 99999 marker.type = Marker.ARROW marker.action = Marker.ADD pt1 = Point() pt2 = Point() pt1.x = pos1[0,0] pt1.y = pos1[1,0] pt1.z = pos1[2,0] pt2.x = pos2[0,0] pt2.y = pos2[1,0] pt2.z = pos2[2,0] marker.points.append(pt1) marker.points.append(pt2) marker.scale.x = 0.05 marker.scale.y = 0.1 #marker.scale.z = scale[2] marker.color.r = color[0] marker.color.g = color[1] marker.color.b = color[2] marker.color.a = color[3] marker.lifetime = rospy.Duration() marker.text = mag_force self.pub.publish(marker)
def callback_Versor(data): #print data m = marker_array.markers[markers_list[id_]] m.header.stamp = rospy.Time.now() m.action = 0 m.type = m.ARROW p0 = Point() p1 = Point() x = 0 y = 0 z = 0 s_x = data.data[0] s_y = data.data[1] s_z = data.data[2] p1.x = (x + 2 * s_x) * 0.2 p1.y = (y + 2 * s_y) * 0.2 p1.z = (z + 2 * s_z) * 0.2 p0.x = x p0.y = y p0.z = z m.points = [] m.points.append(p0) m.points.append(p1) m.scale.x = 0.02 m.scale.y = 0.05 m.scale.z = 0.08 m.color.r = 1 m.color.g = 1 m.color.b = 0 m.color.a = 0.6 m.lifetime = rospy.Duration(0) m.frame_locked = False
def draw_line(self, quad, x, y, z, hash_val): if not rospy.is_shutdown(): marker = Marker() marker.header.frame_id = "/my_frame" marker.lifetime = rospy.Duration(self.duration) marker.type = marker.LINE_STRIP marker.action = marker.ADD marker.scale.x = 4 marker.color.b = 1.0 marker.color.a = 1.0 p1 = Point() p1.x = quad.get_x() p1.y = quad.get_y() p1.z = quad.get_z() p2 = Point() p2.x = x p2.y = y p2.z = z marker.points.append(p1) marker.points.append(p2) marker.pose.orientation.w = 1.0 marker.id = hash_val self.markers.append(marker) return self
def linkMap(): for i in range(0, height*width): currentPoint = Point() currentPoint.x = getX(i) currentPoint.y = getY(i) #print "I is %i, x is %i, y is %i" % (i, currentPoint.x, currentPoint.y) # try adding north if(isInMap(pointAbove(currentPoint))): myPoint = pointAbove(currentPoint) #print "My Point X: %i Y: %i calc Index: %i" % (myPoint.x, myPoint.y,getIndexFromPoint(myPoint.x,myPoint.y)) G[i].addAdjacent(getIndexFromPoint(myPoint.x,myPoint.y)) currentPoint.x = getX(i) currentPoint.y = getY(i) # try adding east if(isInMap(pointRight(currentPoint))): myPoint = pointRight(currentPoint) #print "My Point X: %i Y: %i calc Index: %i" % (myPoint.x, myPoint.y,getIndexFromPoint(myPoint.x,myPoint.y)) G[i].addAdjacent(getIndexFromPoint(myPoint.x,myPoint.y)) currentPoint.x = getX(i) currentPoint.y = getY(i) # try adding south if(isInMap(pointBelow(currentPoint))): myPoint = pointBelow(currentPoint) #print "My Point X: %i Y: %i calc Index: %i" % (myPoint.x, myPoint.y,getIndexFromPoint(myPoint.x,myPoint.y)) G[i].addAdjacent(getIndexFromPoint(myPoint.x,myPoint.y)) currentPoint.x = getX(i) currentPoint.y = getY(i) # try adding west if(isInMap(pointLeft(currentPoint))): myPoint = pointLeft(currentPoint) #print "My Point X: %i Y: %i calc Index: %i" % (myPoint.x, myPoint.y,getIndexFromPoint(myPoint.x,myPoint.y)) G[i].addAdjacent(getIndexFromPoint(myPoint.x,myPoint.y))
def main(): global tfl rospy.init_node('coordinate_publisher') tfl = tf.TransformListener() array_publisher = rospy.Publisher('all_positions', PointArray, queue_size=1) rate = rospy.Rate(100) while not rospy.is_shutdown(): if(fill_points(tfl)): lo = Point() ro = Point() lp = Point() rp = Point() ho = Point() rp.x = right_arm_point[0] rp.y = right_arm_point[1] rp.z = right_arm_point[2] lp.x = left_arm_point[0] lp.y = left_arm_point[1] lp.z = left_arm_point[2] ro.x = right_arm_origin[0] ro.y = right_arm_origin[1] ro.z = right_arm_origin[2] lo.x = left_arm_origin[0] lo.y = left_arm_origin[1] lo.z = left_arm_origin[2] ho.x = head_origin[0] ho.y = head_origin[1] ho.z = head_origin[2] array_publisher.publish((lo, ro, lp, rp, ho)) rate.sleep()
def makeMarker(self, pos, vec, idNum=1, color=(1,0,0)): m = Marker() m.id = idNum m.ns = "test" m.header.frame_id = "/camera_rgb_optical_frame" m.type = Marker.ARROW m.action = Marker.ADD markerPt1 = Point() markerPt2 = Point() markerPt1.x = pos[0]; markerPt1.y = pos[1]; markerPt1.z = pos[2]; markerPt2.x = markerPt1.x + vec[0]; markerPt2.y = markerPt1.y + vec[1]; markerPt2.z = markerPt1.z + vec[2]; m.points = [markerPt1, markerPt2] m.scale.x = .1; m.scale.y = .1; m.scale.z = .1; m.color.a = 1; m.color.r = color[0]; m.color.g = color[1]; m.color.b = color[2]; #m.lifetime = rospy.Duration() return m
def addPolygon(self, points, linewidth=0.02): self.oid = self.oid+1 self.name = "/polygon"+str(self.oid) self.marker = Marker() self.marker.id = self.oid self.marker.ns = "/polygon" self.marker.header.frame_id = self.name self.marker.type = self.marker.LINE_STRIP self.marker.action = self.marker.ADD self.marker.scale.x = linewidth self.marker.scale.y = 1 self.marker.scale.z = 1 self.marker.color.r = 1.0 self.marker.color.g = 0.0 self.marker.color.b = 0.0 self.marker.color.a = 1.0 self.marker.pose.orientation.w = 1.0 self.marker.pose.position.x = 0 self.marker.pose.position.y = 0 self.marker.pose.position.z = 0 self.marker.points = [] for p in points: pt = Point() pt.x = p[0]; pt.y = p[1]; pt.z = p[2] self.marker.points.append(pt) #connect last marker to first marker pt = Point() pt.x = points[0][0]; pt.y = points[0][1]; pt.z = points[0][2] self.marker.points.append(pt) self.markerArray.markers.append(self.marker)
def publishChecked(grid): global ckd print "publishing" k=0 cells = GridCells() cells.header.frame_id = 'map' cells.cell_width = 0.3 # edit for grid size .3 for simple map cells.cell_height = 0.3 # edit for grid size for i in range(1,10): #height should be set to hieght of grid for j in range(1,9): #height should be set to hieght of grid #print k # used for debugging if (grid[k] == 0): point=Point() point.x=j*.3+.32 # edit for grid size point.y=i*.3-.15 # edit for grid size point.z=0 cells.cells.append(point) k=k+1 k=k+1 if (grid[k] == 0): point=Point() point.x=j*.3+.62 # edit for grid size point.y=i*.3-.15 # edit for grid size point.z=0 cells.cells.append(point) #print cells # used for debugging ckd.publish(cells)
def smoothPath(path): #takes the parsed path & tries to remove unecessary zigzags #TODO returnPath = list() for i,node in enumerate(path): averagePoint = Point() if(i+1 < len(path)): nextNode = path[i+1] nextNodeX = getWorldPointFromIndex(nextNode).x nextNodeY = getWorldPointFromIndex(nextNode).y currNode = path[i] currNodeX = getWorldPointFromIndex(currNode).x currNodeY = getWorldPointFromIndex(currNode).y if( not returnPath): averagePoint.x = (currNodeX+nextNodeX)/2 averagePoint.y = (currNodeY+nextNodeY)/2 averagePoint.z = 0#math.atan2(currNodeY-nextNodeY, currNodeX-nextNodeX) else: averagePoint.x = (returnPath[i-1].x+currNodeX)/2 averagePoint.y = (returnPath[i-1].y+currNodeY)/2 averagePoint.z = 0 #math.atan2(currNodeY-returnPath[i-1].y, currNodeX-returnPath[i-1].x) returnPath.append(averagePoint) #print "Average Point in Path: X: %f Y: %f" % (averagePoint.x, averagePoint.y) return returnPath
def get_effective_point(data): map_matrix=map_matrix_ranger(data) #print map_matrix clear_area,block_area=[],[] width=data.info.width height=data.info.height resolution=data.info.resolution map_origin=data.info.origin clear=Point() block=Point() centremap_cell=map_center_cell(map_origin,resolution) #print centremap_cell for y in range(height): for x in range(width): if map_matrix[y][x]==0: clear.x=(x-centremap_cell[0])*resolution clear.y=(y-centremap_cell[1])*resolution + resolution clear_area.append(clear) if map_matrix[y][x]==100: block.x=(x-centremap_cell[0])*resolution block.y=(y-centremap_cell[1])*resolution + resolution block_area.append(block) #print block,'\n' #print block_area clear=Point() block=Point() #print 'block_area',len(block_area) return [clear_area,block_area]
def get_angle(self, link, spin_center, steer_angle, stamp): # lookup the position of each link in the back axle frame ts = self.tf_buffer.lookup_transform(spin_center.header.frame_id, link, stamp, rospy.Duration(4.0)) dy = ts.transform.translation.y - spin_center.point.y dx = ts.transform.translation.x - spin_center.point.x angle = math.atan2(dx, abs(dy)) if steer_angle < 0: angle = -angle # visualize the trajectory forward or back of the current wheel # given the spin center radius = math.sqrt(dx * dx + dy * dy) self.marker.points = [] for pt_angle in numpy.arange(abs(angle) - 1.0, abs(angle) + 1.0 + 0.025, 0.05): pt = Point() pt.x = spin_center.point.x + radius * math.sin(pt_angle) if steer_angle < 0: pt.y = spin_center.point.y - radius * math.cos(pt_angle) else: pt.y = spin_center.point.y + radius * math.cos(pt_angle) self.marker.ns = link self.marker.header.stamp = stamp self.marker.points.append(pt) self.marker_pub.publish(self.marker) return angle, radius
def getWaypoints(path): returnPath = list() point = Point() pointNode = path[0] point.x = getWorldPointFromIndex(pointNode).x point.y = getWorldPointFromIndex(pointNode).y point.z = 0 returnPath.append(point) for i,node in enumerate(path): currPoint = Point() currNode = path[i] currPoint.x = getWorldPointFromIndex(currNode).x currPoint.y = getWorldPointFromIndex(currNode).y currPoint.z = 0 if (i+1 < len(path)): nextPoint = Point() nextNode = path[i+1] nextPoint.x = getWorldPointFromIndex(nextNode).x nextPoint.y = getWorldPointFromIndex(nextNode).y nextPoint.z = 0 if(math.degrees(math.fabs(math.atan2(nextPoint.y-currPoint.y,nextPoint.x-nextPoint.y))) >= 10): returnPath.append(currPoint) else: returnPath.append(currPoint) pass #print "Point in Path: X: %f Y: %f" % (point.x, point.y) return returnPath
def frontier(fullmapGridExplored): global currentPoint smallestCostCell = Point() smallestCostCell.x = 10000 smallestCostCell.y = 10000 smallestCostCell.z=0 smallestDistance =10000 for i in range(0,rows): for j in range(0,columns): amount =0 if(fullmapGridExplored[i][j] > 0 and fullmapGridExplored[i][j]<9000): if(not i==0): if(fullmapGridExplored[i-1][j]==-1): amount= amount+1 if(not j==0): if(fullmapGridExplored[i][j-1]==-1): amount= amount+1 if(not i==columns): if(fullmapGridExplored[i+1][j]): amount= amount+1 if(not j==rows): if(fullmapGridExplored[i][j+1]): amount= amount+1 if(amount>0): pass #put cells in arrayOfFrontires for element in arrayOfFrontires:#create array of frontiers at some point if(heuristic(currentPoint,element)<smallestDistance): smallestCostCell.x = element.x smallestCostCell.y = element.y smallestDistance = heuristic(currentPoint,element) localNav(smallestCostCell,currentPoint) #publish instead
def publishMap(self): markerArray = MarkerArray() marker = Marker() marker.header.frame_id = "/map" marker.type = marker.SPHERE_LIST marker.action = marker.ADD marker.scale.x = 0.05 marker.scale.y = 0.05 marker.scale.z = 0.05 marker.color.a = 1.0 marker.color.r = 1.0 marker.color.g = 1.0 marker.color.b = 0.0 marker.pose.orientation.w = 1.0 marker.pose.position.x = 0.0 marker.pose.position.y = 0.0 marker.pose.position.z = 0.0 marker.id = 0 for p in self.mapPoints: if p["eaten"]: continue if p["powerup"]: continue point = Point() point.x = p["x"] point.y = p["y"] point.z = 0.15 marker.points.append(point) markerArray.markers.append(marker) marker = Marker() marker.header.frame_id = "/map" marker.type = marker.SPHERE_LIST marker.action = marker.ADD marker.scale.x = 0.3 marker.scale.y = 0.3 marker.scale.z = 0.3 marker.color.a = 1.0 marker.color.r = 1.0 marker.color.g = 1.0 marker.color.b = 0.0 marker.pose.orientation.w = 1.0 marker.pose.position.x = 0.0 marker.pose.position.y = 0.0 marker.pose.position.z = 0.0 marker.id = 1 for p in self.mapPoints: if p["eaten"]: continue if not p["powerup"]: continue point = Point() point.x = p["x"] point.y = p["y"] point.z = 0.2 marker.points.append(point) markerArray.markers.append(marker) self.pubMap.publish(markerArray)
def setupStart(pos): global startTheta start = Point() start.x = pos.pose.pose.position.x start.y = pos.pose.pose.position.y start.x = round((int)((start.x/resolution))*resolution,2) #rounds to even multiple of resolution start.y = round((int)((start.y/resolution))*resolution,2) #rounds to even multiple of resolution quat = pos.pose.pose.orientation q = [quat.x, quat.y, quat.z, quat.w] roll, pitch, yaw = euler_from_quaternion(q) startTheta = yaw
def pubViz(self, ast, bst): rate = rospy.Rate(10) for i in range(self.winSize): msgs = MarkerArray() #use1について msg = Marker() #markerのプロパティ msg.header.frame_id = 'camera_link' msg.header.stamp = rospy.Time.now() msg.ns = 'j1' msg.action = 0 msg.id = 1 msg.type = 8 msg.scale.x = 0.1 msg.scale.y = 0.1 msg.color = self.carray[2] #ジョイントポイントを入れる処理 for j1 in range(self.jointSize): point = Point() point.x = self.pdata[0][ast+i][j1][0] point.y = self.pdata[0][ast+i][j1][1] point.z = self.pdata[0][ast+i][j1][2] msg.points.append(point) msg.pose.orientation.w = 1.0 msgs.markers.append(msg) msg = Marker() msg.header.frame_id = 'camera_link' msg.header.stamp = rospy.Time.now() msg.ns = 'j2' msg.action = 0 msg.id = 2 msg.type = 8 msg.scale.x = 0.1 msg.scale.y = 0.1 msg.color = self.carray[1] for j2 in range(self.jointSize): point = Point() point.x = self.pdata[1][bst+i][j2][0] point.y = self.pdata[1][bst+i][j2][1] point.z = self.pdata[1][bst+i][j2][2] msg.points.append(point) msg.pose.orientation.w = 1.0 msgs.markers.append(msg) self.mpub.publish(msgs) rate.sleep()
def translatePoints(gridMap, start, goal): translatedStart = Point(start.x, start.y, 0) translatedGoal = Point(goal.x, goal.y, 0) translatedStart.x = int(round((translatedStart.x - gridMap.info.origin.position.x) * 10)) translatedStart.y = int(round((translatedStart.y - gridMap.info.origin.position.y) * 10)) translatedGoal.x = int(round((translatedGoal.x - gridMap.info.origin.position.x) * 10)) translatedGoal.y = int(round((translatedGoal.y - gridMap.info.origin.position.y) * 10)) return translatedStart, translatedGoal
def GoTorsoCB(self, data): RobotPosition = self.motionProxy.getRobotPosition(False) naoPos = Point() naoPos.x = RobotPosition[0] naoPos.y = RobotPosition[1] naoPos.z = RobotPosition[2] go = Point() go.x = (naoPos.x - self.naoCalib.x) + (self.torsoDestination.x - self.torsoCalib.x) go.y = (naoPos.y - self.naoCalib.y) + (self.torsoDestination.y - self.torsoCalib.y) go.z = (naoPos.z - self.naoCalib.z) + (self.torsoDestination.z - self.torsoCalib.z) self.motionProxy.walkTo(go.x, go.y, go.z)
def pubRviz(self, pos, joints): msgs = MarkerArray() for p in range(len(pos)): msg = Marker() msg.header.frame_id = 'camera_link' msg.header.stamp = rospy.Time.now() msg.ns = 'marker' msg.action = 0 msg.id = p msg.type = 4 msg.scale.x = 0.1 msg.scale.y = 0.1 msg.color = self.carray[2] for i in range(len(pos[p])): point = Point() point.x = pos[p][i][0] point.y = pos[p][i][1] point.z = pos[p][i][2] msg.points.append(point) msg.pose.orientation.w = 1.0 msgs.markers.append(msg) for j in range(len(joints)): msg = Marker() msg.header.frame_id = 'camera_link' msg.header.stamp = rospy.Time.now() msg.ns = 'joints' msg.action = 0 msg.id = j msg.type = 8 msg.scale.x = 0.1 msg.scale.y = 0.1 msg.color = self.carray[j] #print "joints len:"+str(len(joints[j])) for i in range(len(joints[j])): point = Point() point.x = joints[j][i][0] point.y = joints[j][i][1] point.z = joints[j][i][2] msg.points.append(point) msg.pose.orientation.w = 1.0 msgs.markers.append(msg) self.mpub.publish(msgs)
def publishObstacles(self): """ Publishes the obstacles as markers """ mk = Marker() mk.header.stamp = rospy.get_rostime() mk.header.frame_id = '/base_link' mk.ns='basic_shapes' mk.id = 0 mk.type = Marker.POINTS mk.scale.x = 0.3 mk.scale.y = 0.3 mk.scale.z = 0.3 mk.color.r = 1.0 mk.color.a = 1.0 for value in self.obstacle_map.obstacles_in_memory: p = Point() p.x = value[0] p.y = value[1] mk.points.append(p) self.obs_pub.publish(mk)
def getWorldPointFromIndex(index): point=Point() #print "GetX: %i" % getX(index) point.x=(getX(index)*resolution)+offsetX + (.5 * resolution) point.y=(getY(index)*resolution)+offsetY + (.5 * resolution) point.z=0 return point
def clear_octomap(self, center, width, height): rospy.loginfo("Clearing octomap. The box is {}m (x) by {}m (y) around ({}, {})".format(width,height,self.start_pos[0], self.start_pos[1])) min = Point() max = Point() min.x = center[0] - width/2.0 max.x = center[0] + width/2.0 min.y = center[1] - height/2.0 max.y = center[1] + height/2.0 # tweak Z to clear only certain parts of the map (only obstales, only lines, etc) min.z = -5.0 max.z = 5.0 self.clear_octomap_service(min, max)
def cb_points3d(self, p): for i in range(len(p.ids)): mp = Point() mp.x = p.x[i] mp.y = p.y[i] mp.z = p.z[i] self.tracks3d[p.ids[i]].append( mp )
def reset_octomap(self): rospy.loginfo("Resetting by clearing octomap.") min = Point() max = Point() min.x = -100 max.x = 100 min.y = -50 max.y = 50 # tweak Z to clear only certain parts of the map (only obstales, only lines, etc) min.z = -5.0 max.z = 5.0 self.clear_octomap_service(min, max)
def prepare_and_publish_geometry_msg(self): ''' Fill and publish point message ''' point_msg = Point() point_msg.x = 1.0; point_msg.y = 1.0; point_msg.z = 1.0; self.point_msg_pub.publish(point_msg) ''' Fill and publish point message ''' point_stamped_msg = PointStamped() now = rospy.get_rostime() #rospy.loginfo("Current time %i %i", now.secs, now.nsecs) point_stamped_msg.header.stamp = now; point_stamped_msg.header.frame_id = "frame_dummy"; point_stamped_msg.point.x = 1.0; point_stamped_msg.point.y = 1.0; point_stamped_msg.point.z = 1.0; self.pointstamped_msg_pub.publish(point_stamped_msg) ''' Fill and publish twist message ''' twist_msg = Twist() twist_msg.linear.x = 1.0; twist_msg.linear.y = 0.0; twist_msg.linear.z = 0.0; twist_msg.angular.x = 0.0; twist_msg.angular.y = 0.0; twist_msg.angular.z = 0.0; self.twist_msg_pub.publish(twist_msg) ''' Fill and publish pose 2D message Please, do it your self for practice ''' pose_2d_msg = Pose2D() ''' Fill and publish pose stamped message Please, do it your self for practice ''' pose_stamped_msg = PoseStamped() ''' Fill and publish transform message Please, do it your self for practice ''' transform_msg = Transform()
def __make_mesh(self, name, pose, filename): co = CollisionObject() scene = pyassimp.load(filename) if not scene.meshes: raise MoveItCommanderException("There are no meshes in the file") co.operation = CollisionObject.ADD co.id = name co.header = pose.header mesh = Mesh() for face in scene.meshes[0].faces: triangle = MeshTriangle() if len(face.indices) == 3: triangle.vertex_indices = [face.indices[0], face.indices[1], face.indices[2]] mesh.triangles.append(triangle) for vertex in scene.meshes[0].vertices: point = Point() point.x = vertex[0] point.y = vertex[1] point.z = vertex[2] mesh.vertices.append(point) co.meshes = [mesh] co.mesh_poses = [pose.pose] pyassimp.release(scene) return co
def drawlandmark(pub): global pt_count points_tag = Marker() points_tag.header.frame_id = "/base_link" points_tag.action = Marker.ADD points_tag.header.stamp = rospy.Time.now() points_tag.lifetime = rospy.Time(0) points_tag.scale.x = 0.1; points_tag.scale.y = 0.1; points_tag.scale.z = 0.1; points_tag.color.a = 1.0; points_tag.color.r = 1.0; points_tag.color.g = 0.0; points_tag.color.b = 0.0; points_tag.ns = "pts_line" pt_count+=1 points_tag.id = pt_count points_tag.type = Marker.POINTS for x in range(6): p1 = Point() p1.x = tagnum_x[x]/100 p1.y = tagnum_y[x]/100 p1.z = 0 points_tag.points.append(p1) pub.publish(points_tag)
theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w]) rospy.init_node("speed_controller") # Topic subscribed to, Topic type, callback definition name sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom) pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1) speed = Twist() r = rospy.Rate(4) goal = Point() goal.x = 5 goal.y = 5 while not rospy.is_shutdown(): inc_x = goal.x - x inc_y = goal.y - y angle_to_goal = atan2(inc_y, inc_x) if abs(angle_to_goal - theta) > 0.1: speed.linear.x = 0.0 speed.angular.z = 0.3 else: speed.linear.x = 0.5 speed.angular.z = 0.0 pub.publish(speed)
def CompileArray(self, x_index, y_index, h): p = Point() p.x = x_index p.y = y_index p.z = h return p
def node(): global frontiers, mapData, global1, global2, global3, globalmaps, litraIndx, n_robots, namespace_init_count rospy.init_node('filter', anonymous=False) # fetching all parameters map_topic = rospy.get_param('~map_topic', '/map') threshold = rospy.get_param('~costmap_clearing_threshold', 70) info_radius = rospy.get_param( '~info_radius', 1.0 ) #this can be smaller than the laser scanner range, >> smaller >>less computation time>> too small is not good, info gain won't be accurate goals_topic = rospy.get_param('~goals_topic', '/detected_points') n_robots = rospy.get_param('~n_robots', 1) namespace = rospy.get_param('~namespace', '') namespace_init_count = rospy.get_param('namespace_init_count', 1) rateHz = rospy.get_param('~rate', 100) litraIndx = len(namespace) rate = rospy.Rate(rateHz) #------------------------------------------- rospy.Subscriber(map_topic, OccupancyGrid, mapCallBack) #--------------------------------------------------------------------------------------------------------------- for i in range(0, n_robots): globalmaps.append(OccupancyGrid()) if len(namespace) > 0: for i in range(0, n_robots): rospy.Subscriber( namespace + str(i + namespace_init_count) + '/move_base_node/global_costmap/costmap', OccupancyGrid, globalMap) elif len(namespace) == 0: rospy.Subscriber('/move_base_node/global_costmap/costmap', OccupancyGrid, globalMap) #wait if map is not received yet while (len(mapData.data) < 1): pass #wait if any of robots' global costmap map is not received yet for i in range(0, n_robots): while (len(globalmaps[i].data) < 1): pass global_frame = "/" + mapData.header.frame_id tfLisn = tf.TransformListener() if len(namespace) > 0: for i in range(0, n_robots): tfLisn.waitForTransform( global_frame[1:], namespace + str(i + namespace_init_count) + '/base_link', rospy.Time(0), rospy.Duration(10.0)) elif len(namespace) == 0: tfLisn.waitForTransform(global_frame[1:], '/base_link', rospy.Time(0), rospy.Duration(10.0)) rospy.Subscriber(goals_topic, PointStamped, callback=callBack, callback_args=[tfLisn, global_frame[1:]]) pub = rospy.Publisher('frontiers', Marker, queue_size=10) pub2 = rospy.Publisher('centroids', Marker, queue_size=10) filterpub = rospy.Publisher('filtered_points', PointArray, queue_size=10) rospy.loginfo("the map and global costmaps are received") # wait if no frontier is received yet while len(frontiers) < 1: pass points = Marker() points_clust = Marker() #Set the frame ID and timestamp. See the TF tutorials for information on these. points.header.frame_id = mapData.header.frame_id points.header.stamp = rospy.Time.now() points.ns = "markers2" points.id = 0 points.type = Marker.POINTS #Set the marker action for latched frontiers. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL) points.action = Marker.ADD points.pose.orientation.w = 1.0 points.scale.x = 0.2 points.scale.y = 0.2 points.color.r = 255.0 / 255.0 points.color.g = 255.0 / 255.0 points.color.b = 0.0 / 255.0 points.color.a = 1 points.lifetime = rospy.Duration() p = Point() p.z = 0 pp = [] pl = [] points_clust.header.frame_id = mapData.header.frame_id points_clust.header.stamp = rospy.Time.now() points_clust.ns = "markers3" points_clust.id = 4 points_clust.type = Marker.POINTS #Set the marker action for centroids. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL) points_clust.action = Marker.ADD points_clust.pose.orientation.w = 1.0 points_clust.scale.x = 0.2 points_clust.scale.y = 0.2 points_clust.color.r = 0.0 / 255.0 points_clust.color.g = 255.0 / 255.0 points_clust.color.b = 0.0 / 255.0 points_clust.color.a = 1 points_clust.lifetime = rospy.Duration() temppoint = PointStamped() temppoint.header.frame_id = mapData.header.frame_id temppoint.header.stamp = rospy.Time(0) temppoint.point.z = 0.0 arraypoints = PointArray() tempPoint = Point() tempPoint.z = 0.0 #------------------------------------------------------------------------- #--------------------- Main Loop ------------------------------- #------------------------------------------------------------------------- while not rospy.is_shutdown(): #------------------------------------------------------------------------- #Clustering frontier points centroids = [] front = copy(frontiers) if len(front) > 1: ms = MeanShift(bandwidth=0.3) ms.fit(front) centroids = ms.cluster_centers_ #centroids array is the centers of each cluster #if there is only one frontier no need for clustering, i.e. centroids=frontiers if len(front) == 1: centroids = front frontiers = copy(centroids) #------------------------------------------------------------------------- #clearing old frontiers z = 0 while z < len(centroids): cond = False temppoint.point.x = centroids[z][0] temppoint.point.y = centroids[z][1] for i in range(0, n_robots): transformedPoint = tfLisn.transformPoint( globalmaps[i].header.frame_id, temppoint) x = array([transformedPoint.point.x, transformedPoint.point.y]) cond = (gridValue(globalmaps[i], x) > threshold) or cond if (cond or (informationGain(mapData, [centroids[z][0], centroids[z][1]], info_radius * 0.5)) < 0.2): centroids = delete(centroids, (z), axis=0) z = z - 1 z += 1 #------------------------------------------------------------------------- #publishing arraypoints.points = [] for i in centroids: tempPoint.x = i[0] tempPoint.y = i[1] arraypoints.points.append(copy(tempPoint)) filterpub.publish(arraypoints) pp = [] for q in range(0, len(frontiers)): p.x = frontiers[q][0] p.y = frontiers[q][1] pp.append(copy(p)) points.points = pp pp = [] for q in range(0, len(centroids)): p.x = centroids[q][0] p.y = centroids[q][1] pp.append(copy(p)) points_clust.points = pp pub.publish(points) pub2.publish(points_clust) rate.sleep()
def create_point(x=0, y=0, z=0): pt1 = Point() pt1.x = x pt1.y = y pt1.z = z return pt1
def pubTF(self, timer): br = tf.TransformBroadcaster() marker_tmp=Marker(); marker_tmp.header.frame_id="world" marker_tmp.type=marker_tmp.CUBE_LIST; marker_tmp.action=marker_tmp.ADD; marker_static=copy.deepcopy(marker_tmp); marker_dynamic=copy.deepcopy(marker_tmp); marker_dynamic.color=color_dynamic; # marker_dynamic.scale.x=self.world.bbox_dynamic[0] # marker_dynamic.scale.y=self.world.bbox_dynamic[1] # marker_dynamic.scale.z=self.world.bbox_dynamic[2] marker_static.color=color_static; ###################3 marker_array_static_mesh=MarkerArray(); marker_array_dynamic_mesh=MarkerArray(); for i in range(self.world.num_of_dyn_objects + self.world.num_of_stat_objects): t_ros=rospy.Time.now() t=rospy.get_time(); #Same as before, but it's float dynamic_trajectory_msg=DynTraj(); bbox_i=self.bboxes[i]; s=self.world.scale; if(self.type[i]=="dynamic"): [x_string, y_string, z_string] = self.trefoil(self.x_all[i], self.y_all[i], self.z_all[i], s,s,s, self.offset_all[i], self.slower[i]) # print("self.bboxes[i]= ", self.bboxes[i]) dynamic_trajectory_msg.bbox = bbox_i; marker_dynamic.scale.x=bbox_i[0] marker_dynamic.scale.y=bbox_i[1] marker_dynamic.scale.z=bbox_i[2] else: # [x_string, y_string, z_string] = self.static(self.x_all[i], self.y_all[i], self.z_all[i]); dynamic_trajectory_msg.bbox = bbox_i; marker_static.scale.x=bbox_i[0] marker_static.scale.y=bbox_i[1] marker_static.scale.z=bbox_i[2] if(self.type[i]=="static_vert"): [x_string, y_string, z_string] = self.wave_in_z(self.x_all[i], self.y_all[i], self.z_all[i], s, self.offset_all[i], 1.0) else: [x_string, y_string, z_string] = self.wave_in_z(self.x_all[i], self.y_all[i], self.z_all[i], s, self.offset_all[i], 1.0) x = eval(x_string) y = eval(y_string) z = eval(z_string) dynamic_trajectory_msg.is_agent=False; dynamic_trajectory_msg.header.stamp= t_ros; dynamic_trajectory_msg.function = [x_string, y_string, z_string] dynamic_trajectory_msg.pos.x=x #Current position dynamic_trajectory_msg.pos.y=y #Current position dynamic_trajectory_msg.pos.z=z #Current position dynamic_trajectory_msg.id = 4000+ i #Current id 4000 to avoid interference with ids from agents #TODO self.pubTraj.publish(dynamic_trajectory_msg) br.sendTransform((x, y, z), (0,0,0,1), t_ros, self.name+str(dynamic_trajectory_msg.id), "world") #If you want to move the objets in gazebo # gazebo_state = ModelState() # gazebo_state.model_name = str(i) # gazebo_state.pose.position.x = x # gazebo_state.pose.position.y = y # gazebo_state.pose.position.z = z # gazebo_state.reference_frame = "world" # self.pubGazeboState.publish(gazebo_state) #If you want to see the objects in rviz point=Point() point.x=x; point.y=y; point.z=z; ########################## marker=Marker(); marker.id=i; marker.ns="mesh"; marker.header.frame_id="world" marker.type=marker.MESH_RESOURCE; marker.action=marker.ADD; marker.pose.position.x=x marker.pose.position.y=y marker.pose.position.z=z marker.pose.orientation.x=0.0; marker.pose.orientation.y=0.0; marker.pose.orientation.z=0.0; marker.pose.orientation.w=1.0; marker.lifetime = rospy.Duration.from_sec(0.0); marker.mesh_use_embedded_materials=True marker.mesh_resource=self.meshes[i] if(self.type[i]=="dynamic"): marker_dynamic.points.append(point); marker.scale.x=bbox_i[0]; marker.scale.y=bbox_i[1]; marker.scale.z=bbox_i[2]; marker_array_dynamic_mesh.markers.append(marker); if(self.type[i]=="static_vert" or self.type[i]=="static_horiz"): marker.scale.x=bbox_i[0]; marker.scale.y=bbox_i[1]; marker.scale.z=bbox_i[2]; marker_array_static_mesh.markers.append(marker); ########################## marker_static.points.append(point); self.pubShapes_dynamic_mesh.publish(marker_array_dynamic_mesh) self.pubShapes_dynamic.publish(marker_dynamic) # if(self.already_published_static_shapes==False): self.pubShapes_static_mesh.publish(marker_array_static_mesh) self.pubShapes_static.publish(marker_static)
def array_like_to_point(a): p = Point() p.x = a[0] p.y = a[1] p.z = a[2] return p
def createPoint_tuple(pt_tuple): point = Point() point.x = pt_tuple[0] point.y = pt_tuple[1] point.z = pt_tuple[2] return point
def createPoint(x, y, z): point = Point() point.x = x point.y = y point.z = z return point
def detect_video(yolo, video_path, garbage_in_can, emergency_stop): from PIL import Image, ImageFont, ImageDraw #Start ROS node pub, pub_flag, pub_track, pub_frame1, pub_frame2 = start_node() vid = RealsenseCapture() vid.start() bridge = CvBridge() accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() worldy = 0.0 while True: pub_track.publish(0) ret, frames, _ = vid.read() frame = frames[0] depth_frame = frames[1] image = Image.fromarray(frame) image, bottle, person, right, left, bottom, top, right2, left2, bottom2, top2 = yolo.detect_image( image, pub) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.imshow("result", result) yolo_frame = bridge.cv2_to_imgmsg(result, "bgr8") # yolo_frame = result pub_frame1.publish(yolo_frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if (bottle == False) or (person == False): continue # ------------------------------Tracking----------------------------------- # tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'] # tracker_type = tracker_types[7] tracker = cv2.TrackerCSRT_create() tracker2 = cv2.TrackerCSRT_create() # setup initial location of window left, right, top, bottom = left, right, top, bottom r, h, ci, w = top, bottom - top, left, right - left # simply hardcoded the values r, h, c, w frame_b, frame_g, frame_r = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2] hist_b = cv2.calcHist([frame_b[top:bottom, left:right]], [0], None, [256], [0, 256]) hist_g = cv2.calcHist([frame_g[top:bottom, left:right]], [0], None, [256], [0, 256]) hist_r = cv2.calcHist([frame_r[top:bottom, left:right]], [0], None, [256], [0, 256]) cv2.normalize(hist_b, hist_b, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_g, hist_g, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_r, hist_r, 0, 255, cv2.NORM_MINMAX) track_window = (ci, r, w, h) r2, h2, ci2, w2 = top2, bottom2 - top2, left2, right2 - left2 # simply hardcoded the values r, h, c, w hist_bp = cv2.calcHist([frame_b[top2:bottom2, left2:right2]], [0], None, [256], [0, 256]) hist_gp = cv2.calcHist([frame_g[top2:bottom2, left2:right2]], [0], None, [256], [0, 256]) hist_rp = cv2.calcHist([frame_r[top2:bottom2, left2:right2]], [0], None, [256], [0, 256]) cv2.normalize(hist_bp, hist_bp, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_gp, hist_gp, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_rp, hist_rp, 0, 255, cv2.NORM_MINMAX) track_window2 = (ci2, r2, w2, h2) # print(left2, top2, right2-left2, bottom2-top2) cv2.imwrite('bottledetect.jpg', frame[r:r + h, ci:ci + w]) cv2.imwrite('persondetect.jpg', frame[r2:r2 + h2, ci2:ci2 + w2]) # set up the ROI for tracking roi = frame[r:r + h, ci:ci + w] hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.))) roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180]) cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by atleast 1 pt term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) ok = tracker.init(frame, track_window) ok2 = tracker2.init(frame, track_window2) track_thing = 0 #bottle pts = Point() pts2 = Point() untrack = 0 while (1): ret, frames, depth = vid.read() frame = frames[0] depth_frame = frames[1] if ret == True: hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) # apply meanshift to get the new location ok, track_window = tracker.update(frame) x, y, w, h = track_window ok, track_window2 = tracker2.update(frame) x2, y2, w2, h2 = track_window2 # Draw it on image img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2) if not track_thing: img2 = cv2.rectangle(img2, (x2, y2), (x2 + w2, y2 + h2), 255, 2) else: img2 = cv2.rectangle(img2, (x2, y2), (x2 + w2, y2 + h2), (0, 0, 255), 2) cv2.imshow('Tracking', img2) tracking_frame = bridge.cv2_to_imgmsg(img2, "bgr8") # tracking_frame = img2 pub_frame2.publish(tracking_frame) # https://www.intelrealsense.com/wp-content/uploads/2020/06/Intel-RealSense-D400-Series-Datasheet-June-2020.pdf total, cnt = 0, 0 for i in range(3): for j in range(3): dep = depth.get_distance( np.maximum(0, np.minimum(i + x + w // 2, 639)), np.maximum(0, np.minimum(j + y + h // 2, 479))) if (dep) != 0: total += dep cnt += 1 if cnt != 0: worldz = total / cnt # (x-w//2) # 人にぶつからないように距離を確保するため if (worldz < 1.0) or (worldz > 3.0): worldz = 0 else: worldz = 0 total2, cnt2 = 0, 0 for i in range(3): for j in range(3): dep2 = depth.get_distance( np.maximum(0, np.minimum(i + x2 + w2 // 2, 639)), np.maximum(0, np.minimum(j + y2 + h2 // 2, 479))) if dep2 != 0: total2 += dep2 cnt2 += 1 if cnt2 != 0: worldz2 = total2 / cnt2 if worldz2 < 1.0: worldz2 = 0 else: worldz2 = 0 # print('worldz', worldz) # print('worldz2', worldz2) if (worldz == 0) or (worldz2 == 0): # break worldx, worldy = 0, 0 worldx = 0 pts.x, pts.y, pts.z = 0.0, 0.0, 0.0 worldx2, worldy2 = 0, 0 pts2.x, pts2.y, pts2.z = 0.0, 0.0, 0.0 else: # focus length = 1.93mm, distance between depth cameras = about 5cm, a pixel size = 3um if (track_thing == 0): #bottle Tracking u_ud = (0.05 * 1.88 * 10**(-3)) / (3 * 10**(-6) * worldz) # print('u_ud', u_ud) # print('x, y =', (x+w//2)-(img2.shape[1]//2), (img2.shape[0]//2)-(y+h//2)) # 深度カメラとカラーカメラの物理的な距離を考慮した項(-0.3*u_ud) # これらの座標は物体を見たときの左の深度カメラを基準とする worldx = 0.05 * (x + w // 2 - (img2.shape[1] // 2) - 0.3 * u_ud) / u_ud worldy = 0.05 * ((img2.shape[0] // 2) - (y + h)) / u_ud print('x,y,z = ', worldx, worldy, worldz - 1.0) pts.y, pts.z, pts.x = float(worldx), float( worldy), float(worldz) - 1.0 else: #human Tracking u_ud = (0.05 * 1.88 * 10**(-3)) / (3 * 10**(-6) * worldz2) worldx2 = 0.05 * (x2 + w2 // 2 - (img2.shape[1] // 2) - 0.3 * u_ud) / u_ud worldy2 = 0.05 * ((img2.shape[0] // 2) - (y2 + h2)) / u_ud print('x2,y2,z2 = ', worldx2, worldy2, worldz2 - 1.0) pts2.x, pts2.y, pts.z = float(worldx2), float( worldy2), float(worldz2) - 1.0 print("track_thing = ", track_thing) frame_b, frame_g, frame_r = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2] hist_b2 = cv2.calcHist([frame_b[y:y + h, x:x + w]], [0], None, [256], [0, 256]) hist_g2 = cv2.calcHist([frame_g[y:y + h, x:x + w]], [0], None, [256], [0, 256]) hist_r2 = cv2.calcHist([frame_r[y:y + h, x:x + w]], [0], None, [256], [0, 256]) cv2.normalize(hist_b2, hist_b2, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_g2, hist_g2, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_r2, hist_r2, 0, 255, cv2.NORM_MINMAX) hist_bp2 = cv2.calcHist([frame_b[y2:y2 + h2, x2:x2 + w2]], [0], None, [256], [0, 256]) hist_gp2 = cv2.calcHist([frame_g[y2:y2 + h2, x2:x2 + w2]], [0], None, [256], [0, 256]) hist_rp2 = cv2.calcHist([frame_r[y2:y2 + h2, x2:x2 + w2]], [0], None, [256], [0, 256]) cv2.normalize(hist_bp2, hist_bp2, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_gp2, hist_gp2, 0, 255, cv2.NORM_MINMAX) cv2.normalize(hist_rp2, hist_rp2, 0, 255, cv2.NORM_MINMAX) comp_b = cv2.compareHist(hist_b, hist_b2, cv2.HISTCMP_CORREL) comp_g = cv2.compareHist(hist_g, hist_g2, cv2.HISTCMP_CORREL) comp_r = cv2.compareHist(hist_r, hist_r2, cv2.HISTCMP_CORREL) comp_bp = cv2.compareHist(hist_bp, hist_bp2, cv2.HISTCMP_CORREL) comp_gp = cv2.compareHist(hist_gp, hist_gp2, cv2.HISTCMP_CORREL) comp_rp = cv2.compareHist(hist_rp, hist_rp2, cv2.HISTCMP_CORREL) # print('compareHist(b)', comp_b) # print('compareHist(g)', comp_g) # print('compareHist(r)', comp_r) # print('compareHist(bp)', comp_bp) # print('compareHist(gp)', comp_gp) # print('compareHist(rp)', comp_rp) # print("garbage_in_can", garbage_in_can) # 追跡を止める条件は,bottle追跡中にヒストグラムが大きく変化するか枠が無くなるまたはpersonを見失う,もしくはperson追跡中にヒストグラムが大きく変化するか枠が無くなるまたはゴミがゴミ箱に入れられた, if ((track_thing == 0 and ((comp_b <= 0.1) or (comp_g <= 0.1) or (comp_r <= 0.1) or track_window == (0, 0, 0, 0))) or (track_window2 == (0, 0, 0, 0)) or (track_thing == 1 and ((comp_bp <= 0.) or (comp_gp <= 0.) or (comp_rp <= 0.)))): untrack += 1 print("untrack = ", untrack) if untrack >= 30: print("追跡が外れた!\n") break elif (track_thing == 0 and (x + w > 640 or x < 0) and (y + h > 480 or y < 0)) or (track_thing == 1 and (x2 + w2 > 640 or x2 < 0) and (y2 + h2 > 480 or y2 < 0)): untrack += 1 print("untrack = ", untrack) if untrack >= 50: print("枠が画面外で固まった") break elif (track_thing == 1 and garbage_in_can == 1): print("ゴミを捨てたため追跡を終えます") break else: untrack = 0 # ポイ捨ての基準はbottleを追跡していて,地面から10cmのところまで落ちたか,bottleを見失ったかつ見失う前のフレームでの高さがカメラの10cmより下 print('track_window = ', track_window) if (((worldy <= -0.10) or (track_window == (0, 0, 0, 0) and (worldy < 0.5))) and (not track_thing)): print("ポイ捨てした!\n") track_thing = 1 #human if track_thing == 0: tracking_point = pts if not (pts.x == 0.0 and pts.y == 0.0 and pts.z == 0.0): pub.publish(tracking_point) flag = 0 #bottle else: tracking_point = pts2 if not (pts2.x == 0.0 and pts2.y == 0.0 and pts2.z == 0.0): pub.publish(tracking_point) flag = 1 #person pub_flag.publish(flag) k = cv2.waitKey(1) & 0xff print("emergency_stop", emergency_stop) if (k == 27) or emergency_stop == 1: # dev # if emergency_stop: # ops print("program is stoped!") sys.exit(0) else: break pub_track.publish(1) yolo.close_session()
def gnd_marker_pub(gnd_label, marker_pub, cfg, color="red"): length = int(cfg.grid_range[2] - cfg.grid_range[0]) # x direction width = int(cfg.grid_range[3] - cfg.grid_range[1]) # y direction gnd_marker = Marker() gnd_marker.header.frame_id = "/kitti/base_link" gnd_marker.header.stamp = rospy.Time.now() gnd_marker.type = gnd_marker.LINE_LIST gnd_marker.action = gnd_marker.ADD gnd_marker.scale.x = 0.05 gnd_marker.scale.y = 0.05 gnd_marker.scale.z = 0.05 if (color == "red"): gnd_marker.color.a = 1.0 gnd_marker.color.r = 1.0 gnd_marker.color.g = 0.0 gnd_marker.color.b = 0.0 if (color == "green"): gnd_marker.color.a = 1.0 gnd_marker.color.r = 0.0 gnd_marker.color.g = 1.0 gnd_marker.color.b = 0.0 gnd_marker.points = [] # gnd_labels are arranged in reverse order for j in range(gnd_label.shape[0]): for i in range(gnd_label.shape[1]): pt1 = Point() pt1.x = i + cfg.grid_range[0] pt1.y = j + cfg.grid_range[1] pt1.z = gnd_label[j, i] if j > 0: pt2 = Point() pt2.x = i + cfg.grid_range[0] pt2.y = j - 1 + cfg.grid_range[1] pt2.z = gnd_label[j - 1, i] gnd_marker.points.append(pt1) gnd_marker.points.append(pt2) if i > 0: pt2 = Point() pt2.x = i - 1 + cfg.grid_range[0] pt2.y = j + cfg.grid_range[1] pt2.z = gnd_label[j, i - 1] gnd_marker.points.append(pt1) gnd_marker.points.append(pt2) if j < width - 1: pt2 = Point() pt2.x = i + cfg.grid_range[0] pt2.y = j + 1 + cfg.grid_range[1] pt2.z = gnd_label[j + 1, i] gnd_marker.points.append(pt1) gnd_marker.points.append(pt2) if i < length - 1: pt2 = Point() pt2.x = i + 1 + cfg.grid_range[0] pt2.y = j + cfg.grid_range[1] pt2.z = gnd_label[j, i + 1] gnd_marker.points.append(pt1) gnd_marker.points.append(pt2) marker_pub.publish(gnd_marker)
def CompileTransform(self, x_index, y_index, h): p = Point() p.x = self.transform_grid[x_index][y_index][0] p.y = self.transform_grid[x_index][y_index][1] p.z = h return p
def polygonial(): ids = [2, 5, 1] #define the differents points test_point = Point() test_point.x = 0 test_point.y = 0 test_point.z = 0.5 home_points_fo8 = [Point(), Point(), Point()] home_points_fo8[0].x = 0.0 home_points_fo8[0].y = 0.0 home_points_fo8[0].z = 0.3 home_points_fo8[1].x = 0.3 home_points_fo8[1].y = 0.0 home_points_fo8[1].z = 0.3 home_points_fo8[2].x = -0.3 home_points_fo8[2].y = 0.0 home_points_fo8[2].z = 0.3 home_points_heli = [Point(), Point(), Point()] home_points_heli[0].x = 0.0 home_points_heli[0].y = 0.0 home_points_heli[0].z = 0.3 home_points_heli[1].x = 0.0 home_points_heli[1].y = 0.9 - 0.4 home_points_heli[1].z = 0.9 home_points_heli[2].x = -0.3 home_points_heli[2].y = 0.0 home_points_heli[2].z = 0.3 my_points = [ Point(), Point(), Point(), Point(), Point(), Point(), Point(), Point() ] my_points[0].x = 0.5 my_points[0].y = -0.5 my_points[0].z = 0.5 my_points[1].x = 0.7 my_points[1].y = 0.0 my_points[1].z = 0.5 my_points[2].x = 0.5 my_points[2].y = 0.5 my_points[2].z = 0.5 my_points[3].x = 0 my_points[3].y = 0.7 my_points[3].z = 0.5 my_points[4].x = -0.5 my_points[4].y = 0.5 my_points[4].z = 0.5 my_points[5].x = -0.7 my_points[5].y = 0 my_points[5].z = 0.5 my_points[6].x = -0.5 my_points[6].y = -0.5 my_points[6].z = 0.5 my_points[7].x = 0 my_points[7].y = -0.7 my_points[7].z = 0.5 # Create a SMACH state machine sm0 = StateMachine(outcomes=['succeeded', 'aborted', 'preempted']) #progressively add drones with sm0: """ StateMachine.add('HELI_START', SimpleActionState('drone1detect_perimeter', my_newAction, goal = my_newGoal(point = home_points_heli[0], id = ids[0] )), transitions={'succeeded' : 'HELI_EXECUTE', 'aborted' : 'land_all', 'preempted' : 'land_all'}) StateMachine.add('HELI_EXECUTE', SimpleActionState('trajectory_action', doTrajAction, goal = doTrajGoal(shape = 1, id = ids[0] )), transitions={'succeeded' : 'HELI_END', 'aborted' : 'land_all', 'preempted' : 'land_all'}) StateMachine.add('HELI_END', SimpleActionState('land_drone1', my_newAction, goal = my_newGoal(point = my_points[3], id = ids[0]) ), transitions={'succeeded' : 'drone2-FIG8', 'aborted' : 'land_all', 'preempted' : 'land_all'}) """ StateMachine.add('drone2-FIG8', SimpleActionState('drone2detect_perimeter', my_newAction, goal=my_newGoal( point=home_points_fo8[2], id=ids[1])), transitions={ 'succeeded': 'drone3-FIG8', 'aborted': 'land_all', 'preempted': 'land_all' }) StateMachine.add('drone3-FIG8', SimpleActionState('drone3detect_perimeter', my_newAction, goal=my_newGoal( point=home_points_fo8[1], id=ids[2])), transitions={ 'succeeded': 'FIG8_EXECUTE', 'aborted': 'land_all', 'preempted': 'land_all' }) # fig8_sm = Concurrence(['drone2-2', 'land_all'], 'land_all', # outcome_map={'drone2-2' : {'FIG8_EXECUTE_drone3' : 'succeeded', 'FIG8_EXECUTE_drone2' : 'succeeded'}}) fig8_sm = Concurrence(['succeeded', 'aborted', 'preempted'], 'succeeded') #, #outcome_map={'succeeded' : 'drone2-2', 'preempted' : 'land_all', 'aborted' : 'land_all' }) StateMachine.add('FIG8_EXECUTE', fig8_sm, transitions={ 'succeeded': 'drone2-2', 'aborted': 'land_all', 'preempted': 'land_all' }) ## Bug BBL: DO NOT try to decide on outcomes in a concurrence, do it in the SM.add!! with fig8_sm: Concurrence.add( 'FIG8_EXECUTE_drone2', SimpleActionState('fig8_drone2', doTrajAction, goal=doTrajGoal(shape=8, id=ids[1]))) Concurrence.add( 'FIG8_EXECUTE_drone3', SimpleActionState('fig8_drone3', doTrajAction, goal=doTrajGoal(shape=8, id=ids[2]))) heli_sm = Concurrence(['succeeded', 'aborted', 'preempted'], 'succeeded') #, #outcome_map={'succeeded' : 'drone2-2', 'preempted' : 'land_all', 'aborted' : 'land_all' }) StateMachine.add('HELI_EXECUTE', heli_sm, transitions={ 'succeeded': 'drone2-2', 'aborted': 'land_all', 'preempted': 'land_all' }) ## Bug BBL: DO NOT try to decide on outcomes in a concurrence, do it in the SM.add!! with heli_sm: Concurrence.add( 'HELI_EXECUTE_drone2', SimpleActionState('heli_drone2', doTrajAction, goal=doTrajGoal(shape=0, id=ids[1]))) Concurrence.add( 'HELI_EXECUTE_drone3', SimpleActionState('heli_drone3', doTrajAction, goal=doTrajGoal(shape=0, id=ids[2]))) fig8_end = Concurrence(['succeeded', 'aborted', 'preempted'], 'succeeded') #, #outcome_map={'succeeded' : 'drone2-2', 'preempted' : 'land_all', 'aborted' : 'land_all' }) StateMachine.add('FIG8_END', fig8_end, transitions={ 'succeeded': 'drone2-2', 'aborted': 'land_all', 'preempted': 'land_all' }) ## Bug BBL: DO NOT try to decide on outcomes in a concurrence, do it in the SM.add!! with fig8_end: Concurrence.add( 'FIG8_END_drone2', SimpleActionState('land_drone2', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[1]))) Concurrence.add( 'FIG8_END_drone3', SimpleActionState('land_drone3', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[2]))) StateMachine.add('drone2-2', SimpleActionState('drone2detect_perimeter', my_newAction, goal=my_newGoal(point=my_points[2], id=ids[1])), transitions={ 'succeeded': 'drone3-1', 'aborted': 'land_all', 'preempted': 'land_all' }) StateMachine.add('drone3-1', SimpleActionState('drone3detect_perimeter', my_newAction, goal=my_newGoal(point=my_points[0], id=ids[2])), transitions={ 'succeeded': 'drone1-3', 'aborted': 'land_all', 'preempted': 'land_all' }) StateMachine.add('drone1-3', SimpleActionState('drone1detect_perimeter', my_newAction, goal=my_newGoal(point=my_points[4], id=ids[0])), transitions={ 'succeeded': 'infinit_loop', 'aborted': 'land_all', 'preempted': 'land_all' }) land_sm = Concurrence(['succeeded', 'aborted', 'preempted'], 'succeeded') StateMachine.add('land_all', land_sm) with land_sm: # DUPLICATA FOR HIGH LEVEL # #Land Drone If Aborted Concurrence.add( 'LAND_DRONE1', SimpleActionState('land_drone1', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[0]))) # #Land Drone If Aborted Concurrence.add( 'LAND_DRONE2', SimpleActionState('land_drone2', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[1]))) # #Land Drone If Aborted Concurrence.add( 'LAND_DRONE3', SimpleActionState('land_drone3', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[2]))) ############################################ sm1 = Concurrence( ['succeeded', 'aborted', 'preempted'], 'succeeded', #child_termination_cb = lambda so: True, #outcome_map = { #'succeeded':{'WAIT_FOR_CLEAR':'valid'}, #'aborted':{'DRONE1':'aborted'}} ) StateMachine.add('infinit_loop', sm1) with sm1: drone1 = StateMachine( outcomes=['succeeded', 'aborted', 'preempted']) # ['succeeded','aborted','preempted'] test_drone = StateMachine( outcomes=['succeeded', 'aborted', 'preempted']) # ['succeeded','aborted','preempted'] #Concurrence.add('DRONE1', test_drone) with test_drone: order = (5, 6, 7, 0, 1, 2, 3, 4) for i in range(7): point_for_state = Point() point_for_state.x = test_point.x point_for_state.y = test_point.y if i <= 4: point_for_state.z = test_point.z + (i * 0.1) else: point_for_state.z = test_point.z - (i * 0.1) + 0.8 StateMachine.add('DRONE1-' + str(order[i]), SimpleActionState( 'drone1detect_perimeter', my_newAction, goal=my_newGoal(point=point_for_state, id=ids[0])), transitions={ 'succeeded': 'DRONE1-' + str(order[i + 1]), 'aborted': 'LAND_DRONE1', 'preempted': 'LAND_DRONE1' }) #make it infinit smach.StateMachine.add( 'DRONE1-' + str(order[-1]), SimpleActionState('drone1detect_perimeter', my_newAction, goal=my_newGoal(point=test_point, id=ids[0])), transitions={ 'succeeded': 'DRONE1-' + str(order[0]), 'aborted': 'LAND_DRONE1', 'preempted': 'LAND_DRONE1' }) Concurrence.add('DRONE1', drone1) # Open the container with drone1: #add each state order = (5, 6, 7, 0, 1, 2, 3, 4) for i in range(7): point_for_state = Point() point_for_state.x = my_points[order[i]].x point_for_state.y = my_points[order[i]].y if i <= 4: point_for_state.z = my_points[order[i]].z + (i * 0.1) else: point_for_state.z = my_points[order[i]].z - (i * 0.1) + 0.8 StateMachine.add('DRONE1-' + str(order[i]), SimpleActionState( 'drone1detect_perimeter', my_newAction, goal=my_newGoal(point=point_for_state, id=ids[0])), transitions={ 'succeeded': 'DRONE1-' + str(order[i + 1]), 'aborted': 'LAND_DRONE1', 'preempted': 'LAND_DRONE1' }) #make it infinit smach.StateMachine.add( 'DRONE1-' + str(order[-1]), SimpleActionState('drone1detect_perimeter', my_newAction, goal=my_newGoal( point=my_points[order[-1]], id=ids[0])), transitions={ 'succeeded': 'DRONE1-' + str(order[0]), 'aborted': 'LAND_DRONE1', 'preempted': 'LAND_DRONE1' }) # #Land Drone If Aborted smach.StateMachine.add( 'LAND_DRONE1', SimpleActionState('land_drone1', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[0])), transitions={'succeeded': 'LAND_DRONE1'}) drone2 = StateMachine( outcomes=['succeeded', 'aborted', 'preempted']) # ['succeeded','aborted','preempted'] Concurrence.add('DRONE2', drone2) # Open the container with drone2: #add each state order = (3, 4, 5, 6, 7, 0, 1, 2) for i in range(7): point_for_state = Point() point_for_state.x = my_points[order[i]].x point_for_state.y = my_points[order[i]].y if i <= 4: point_for_state.z = my_points[order[i]].z + (i * 0.1) else: point_for_state.z = my_points[order[i]].z - (i * 0.1) + 0.8 StateMachine.add('DRONE2-' + str(order[i]), SimpleActionState( 'drone2detect_perimeter', my_newAction, goal=my_newGoal(point=point_for_state, id=ids[1])), transitions={ 'succeeded': 'DRONE2-' + str(order[i + 1]), 'aborted': 'LAND_DRONE2', 'preempted': 'LAND_DRONE2' }) #make it infinit smach.StateMachine.add( 'DRONE2-' + str(order[-1]), SimpleActionState('drone2detect_perimeter', my_newAction, goal=my_newGoal( point=my_points[order[-1]], id=ids[1])), transitions={ 'succeeded': 'DRONE2-' + str(order[0]), 'aborted': 'LAND_DRONE2', 'preempted': 'LAND_DRONE2' }) # #Land Drone If Aborted smach.StateMachine.add( 'LAND_DRONE2', SimpleActionState('land_drone2', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[1])), transitions={'succeeded': 'LAND_DRONE2'}) drone3 = StateMachine( outcomes=['succeeded', 'aborted', 'preempted']) # ['succeeded','aborted','preempted'] Concurrence.add('DRONE3-', drone3) # Open the container with drone3: #add each state order = (1, 2, 3, 4, 5, 6, 7, 0) for i in range(7): point_for_state = Point() point_for_state.x = my_points[order[i]].x point_for_state.y = my_points[order[i]].y if i <= 4: point_for_state.z = my_points[order[i]].z + (i * 0.1) else: point_for_state.z = my_points[order[i]].z - (i * 0.1) + 0.8 StateMachine.add('DRONE3-' + str(order[i]), SimpleActionState( 'drone3detect_perimeter', my_newAction, goal=my_newGoal(point=point_for_state, id=ids[2])), transitions={ 'succeeded': 'DRONE3-' + str(order[i + 1]), 'aborted': 'LAND_DRONE3', 'preempted': 'LAND_DRONE3' }) #make it infinit smach.StateMachine.add( 'DRONE3-' + str(order[-1]), SimpleActionState('drone3detect_perimeter', my_newAction, goal=my_newGoal( point=my_points[order[-1]], id=ids[2])), transitions={ 'succeeded': 'DRONE3-' + str(order[0]), 'aborted': 'LAND_DRONE3', 'preempted': 'LAND_DRONE3' }) # #Land Drone If Aborted smach.StateMachine.add( 'LAND_DRONE3', SimpleActionState('land_drone3', my_newAction, goal=my_newGoal(point=my_points[3], id=ids[2])), transitions={'succeeded': 'LAND_DRONE3'}) # Attach a SMACH introspection server sis = IntrospectionServer('smach_usecase_01', sm0, '/USE_CASE') sis.start() # Set preempt handler smach_ros.set_preempt_handler(sm0) # Execute SMACH tree in a separate thread so that we can ctrl-c the script smach_thread = threading.Thread(target=sm0.execute) smach_thread.start()
def callback(self, PointCloud2): global pc_stack, frame_stack, Lane_markers_array, Plane_markers_array, id_lane_global, id_plane_global, weight_past pc_np = get_xyzi_points(pointcloud2_to_array(PointCloud2)) xyz_points = pc_np[:, :3] intensity = pc_np[:, 3] road_pts = extract_points(pc_np) odom_mat = get_odom(self.tf, "velo_link", "map") if odom_mat is not None: points = get_transformation(odom_mat, road_pts) pc_stack = np.append(pc_stack, points, axis=0) if (PointCloud2.header.seq > 0) and (PointCloud2.header.seq % frame_stack == 0): # print("pub : ", PointCloud2.header.seq) height = pc_stack[..., 2].mean() - 0.5 pc_stack[..., 2] = 0 base_mat = get_odom(self.tf, "map", "base_link") map_mat = get_odom(self.tf, "base_link", "map") if base_mat is not None: basepoints = get_transformation(base_mat, pc_stack) grid_y = [i for i in basepoints[:, 1]] grid_y_start = min(grid_y) grid_y_end = max(grid_y) interval = 1.2 lane1x = sys.maxsize lane1y = sys.maxsize lane2x = -sys.maxsize - 1 lane2y = -sys.maxsize - 1 atan_theta = 0 len_line = 0 atan_theta2 = 0 len_line2 = 0 while grid_y_start < grid_y_end: xgrid = [] ygrid = [] grid = [] for j in range(len(grid_y)): if basepoints[:, 1][j] >= grid_y_start and basepoints[:, 1][ j] < grid_y_start + interval: xgrid.append(basepoints[:, 0][j]) ygrid.append(basepoints[:, 1][j]) if len(ygrid) < 25: grid_y_start = grid_y_start + interval if grid_y_start > grid_y_end: break continue temp = pd.DataFrame({'X': xgrid, 'Y': ygrid}) grid = temp.as_matrix() ransac = linear_model.RANSACRegressor( linear_model.LinearRegression(), max_trials=100, min_samples=None, residual_threshold=0.05) def add_square_feature(X): # X = np.concatenate([(X**2).reshape(-1,1), X], axis=1) return X sub_cluster_df = grid Xpoints = sub_cluster_df[..., 0] Xpoints = Xpoints.reshape(-1, 1) Ypoints = sub_cluster_df[..., 1] Ypoints = Ypoints.reshape(-1, 1) ransac.fit(add_square_feature(Xpoints), Ypoints) inlier_mask = ransac.inlier_mask_ line_X = np.arange(Xpoints.min(), Xpoints.max())[:, np.newaxis] line_y_ransac = ransac.predict(add_square_feature(line_X)) if len_line2 < (line_X.max() - line_X.min()): atan_theta2 = math.atan( (line_y_ransac.max() - line_y_ransac.min()) / (line_X.max() - line_X.min())) len_line2 = line_X.max() - line_X.min() line_X = line_X.reshape(-1) line_y_ransac = line_y_ransac.reshape(-1) line_z = [0 for _ in range(len(line_X))] line_tmp = pd.DataFrame({ 'X': line_X, 'Y': line_y_ransac, 'Z': line_z }) line_ransac = line_tmp.as_matrix() if map_mat is not None: line_ransac = get_transformation(map_mat, line_ransac) line_X = line_ransac[:, 0].reshape(-1, 1) line_y_ransac = line_ransac[:, 1].reshape(-1, 1) if line_X.min() < lane1x: lane1x = line_X.min() if line_y_ransac.min() < lane1y: lane1y = line_y_ransac.min() if line_X.max() > lane2x: lane2x = line_X.max() if line_y_ransac.max() > lane2y: lane2y = line_y_ransac.max() if len_line < (line_X.max() - line_X.min()): atan_theta = math.atan( (line_y_ransac.max() - line_y_ransac.min()) / (line_X.max() - line_X.min())) len_line = line_X.max() - line_X.min() #print("theta: ",atan_theta2) if atan_theta2 < 0.1: quaternion = tf2.transformations.quaternion_from_euler( 0, np.pi / 2, atan_theta) # if line_theta < 0.3 and line_theta >0.05: Lane_marker = Marker( type=Marker.CYLINDER, id=id_lane_global, lifetime=rospy.Duration(300), pose=Pose( Point(0.0, 0.0, 0), Quaternion(quaternion[0], quaternion[1], quaternion[2], quaternion[3])), scale=Vector3(0.01, 0.5, line_X.max() - line_X.min()), # line width header=PointCloud2.header, color=ColorRGBA(1.0, 1.0, 1.0, 1.0)) Lane_marker.header.frame_id = "/map" l_points = Point() l_points.x = np.median(line_X) l_points.y = np.median(line_y_ransac) l_points.z = height Lane_marker.pose.position = l_points id_lane_global += 1 Lane_markers_array.markers.append(Lane_marker) grid_y_start = grid_y_start + interval if grid_y_start > grid_y_end: break rect_point1 = Point(lane1x, lane1y, 0) rect_point2 = Point(lane2x, lane2y, 0) quaternion = tf2.transformations.quaternion_from_euler( 0, 0, atan_theta) Plane_marker = Marker( type=Marker.CUBE, header=PointCloud2.header, action=Marker.ADD, id=id_plane_global, scale=Vector3(np.fabs(rect_point1.x - rect_point2.x), np.fabs(rect_point1.y - rect_point2.y), np.fabs(rect_point1.z - rect_point2.z)), color=ColorRGBA(0.0, 0.0, 0.0, 1.0), pose=Pose( Point( (rect_point1.x - rect_point2.x) / 2.0 + rect_point2.x, (rect_point1.y - rect_point2.y) / 2.0 + rect_point2.y, height - 0.2), Quaternion(quaternion[0], quaternion[1], quaternion[2], quaternion[3])), lifetime=rospy.Duration(300)) id_plane_global += 1 Plane_marker.header.frame_id = "/map" Plane_markers_array.markers.append(Plane_marker) PointCloud2.header.frame_id = "/map" point_pc2 = xyzarray_to_pc2(pc_stack, PointCloud2) pc_stack = np.empty((0, 3), float) self.pub_Lane_marker.publish(Lane_markers_array) self.pub_Plane_marker.publish(Plane_markers_array) self.lidar_pub.publish(point_pc2)
def init(): global g_limb, g_orientation_hand_down, g_position_neutral, pos, posp, gripper global marker_p, marker_q global square_p, square_q global llc_p, llc_q global prev_pose, rotation global subscriber_pose global cam_pos #Set up arm stuff rospy.init_node('cairo_sawyer_ik_example') g_limb = intera_interface.Limb('right') subscriber_pose = rospy.Subscriber('/robot/limb/right/endpoint_state', Pose, callback_subscriber_pose) rospy.sleep(2) gripper = intera_interface.Gripper() #set up camera control rospy.init_node('tags') rospy.Subscriber('/tag_detections', AprilTagDetectionArray, callback_cam_pos) # Straight down and 'neutral' position g_orientation_hand_down = Quaternion() g_orientation_hand_down.x = 0.704238785359 g_orientation_hand_down.y = 0.709956638597 g_orientation_hand_down.z = -0.00229009932359 g_orientation_hand_down.w = 0.00201493272073 g_position_neutral = Point() g_position_neutral.x = 0.45371551183 g_position_neutral.y = 0.0663097073071 g_position_neutral.z = 0.0271459370863 prev_pose = Pose() prev_pose.orientation = g_orientation_hand_down prev_pose.position = g_position_neutral #Marker position marker_q = Quaternion() marker_q.x = 0.704238785359 marker_q.y = 0.709956638597 marker_q.z = -0.00229009932359 marker_q.w = 0.00201493272073 marker_p = Point() #these ducking positions better be meters or im out marker_p.x = g_position_neutral.x + (cam.base.x - cam.marker.x) marker_p.y = g_position_neutral.y + (cam.base.y - cam.marker.y) #plz test marker_p.z = 0.00 # marker 0.0125670410943 #Square position square_q = Quaternion() square_q.x = 0.704238785359 square_q.y = 0.709956638597 square_q.z = -0.00229009932359 square_q.w = 0.00201493272073 square_p = Point() square_p.x = 0.53430244888 square_p.y = -0.152176453277 square_p.z = 0.1125670410943 #Lower Left Corner of board llc_q = Quaternion() llc_q.x = 0.70423878535 llc_q.y = 0.709956638597 llc_q.z = -0.00229009932359 llc_q.w = 0.00201493272073 llc_p = Point() llc_p.x = 0.487424263171 llc_p.y = -0.10 llc_p.z = -0.05
def camera_callback(self, data): try: # We select bgr8 because its the OpneCV encoding by default cv_image = self.bridge_object.imgmsg_to_cv2( data, desired_encoding="bgr8") except CvBridgeError as e: print(e) # We get image dimensions and crop the parts of the image we don't need # Bear in mind that because the first value of the image matrix is start and second value is down limit. # Select the limits so that it gets the line not too close and not too far, and the minimum portion possible # To make process faster. height, width, channels = cv_image.shape rows_to_watch = 100 top_trunc = 1 * height / 2 #get 3/4 of the height from the top section of the image bot_trunc = top_trunc + rows_to_watch #next set of rows to be used crop_img = cv_image[top_trunc:bot_trunc, 0:width] # Convert from RGB to HSV hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV).astype(np.float) # Define the Yellow Colour in HSV #RGB #[[[222,255,0]]] #BGR #[[[0,255,222]]] """ To know which color to track in HSV, put in BGR. Use ColorZilla to get the color registered by the camera >>> yellow = np.uint8([[[B,G,R ]]]) >>> hsv_yellow = cv2.cvtColor(yellow,cv2.COLOR_BGR2HSV) >>> print( hsv_yellow ) [[[ 34 255 255]] """ lower_yellow = np.array([20, 100, 100]) upper_yellow = np.array([50, 255, 255]) # Threshold the HSV image to get only yellow colors mask = cv2.inRange(hsv, lower_yellow, upper_yellow) # Calculate centroid of the blob of binary image using ImageMoments m = cv2.moments(mask, False) try: cx, cy = m['m10'] / m['m00'], m['m01'] / m['m00'] except ZeroDivisionError: cy, cx = height / 2, width / 2 # Bitwise-AND mask and original image res = cv2.bitwise_and(crop_img, crop_img, mask=mask) # Draw the centroid in the resultut image # cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) cv2.circle(res, (int(cx), int(cy)), 10, (0, 0, 255), -1) cv2.imshow("Original", cv_image) cv2.imshow("HSV", hsv) cv2.imshow("MASK", mask) cv2.imshow("RES", res) cv2.waitKey(1) # Camera coordinates p = Point() p.x = cx - width / 2 p.y = cy - height / 2 self.pub.publish(p)
def addScaledPoint(x, y): pointToAdd = Point() pointToAdd.x = (x * resolution) + offsetX + (.5 * resolution) pointToAdd.y = (y * resolution) + offsetY + (.5 * resolution) pointToAdd.z = 0 return pointToAdd
def tfPositionToGeometry(position): result = Point() result.x = position[0] result.y = position[1] result.z = position[2] return result
from tf.transformations import euler_from_quaternion import math from math import atan2 x = 1 y = 1 z = 0 theta = 0 goal = Point() vel = Twist() kp = 1 kd = 0 ki = 0 end_point = Point() end_point.x = 6 end_point.y = 6 final_path = valuelist() def path_callback(data): global final_path final_path.valuex = data.valuex final_path.valuey = data.valuey def odom_callback(data): global x, y, theta x = data.pose.pose.position.x y = data.pose.pose.position.y rot_tur = data.pose.pose.orientation
(roll, pith, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w]) rospy.init_node("speed_controller") subscriber = rospy.Subscriber("/odom", Odometry, newOdom) publisher = rospy.Publisher("/cmd_vel", Twist, queue_size=1) speed = Twist() r = rospy.Rate(4) goal = Point() goal.x = input("set the x") goal.y = input("set the y") while not rospy.is_shutdown(): inc_x = goal.x - x inc_y = goal.y - y angle_to_goal = atan2(inc_y, inc_x) if abs(angle_to_goal - theta) > 0.1: speed.linear.x = 0.0 speed.angular.z = 0.3 else: speed.linear.x = 0.5 speed.angular.z = 0.0 publisher.publish(speed) r.sleep()
def callback_camera2(self, ros_data): print "callback camera 2: centrado de pelota en la imagen" # Use the bridge to convert the ROS Image message to OpenCV message cv_image = self.bridge.imgmsg_to_cv2(ros_data, desired_encoding="passthrough") # Convert the cv_image to the HSV color space hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) # Construct a mask for the color "colour", then perform a series of dilations and erosions to remove any small blobs left in the mask mask = cv2.inRange(hsv, self.colourLower, self.colourUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) # Find contours in the mask and initialize the current (x, y) center of the ball cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # Only proceed if at least one contour was found if len(cnts) > 0: # Find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) # Only proceed if the radius meets a minimum size if radius > 10: # Draw the circle and centroid on the cv_image cv2.circle(cv_image, (int(x), int(y)), int(radius), (0, 255, 255), 2) cv2.circle(cv_image, center, 5, (0, 0, 255), -1) # Conversion of the OpenCV format image to ROS format and publish it in a topic ros_image = self.bridge.cv2_to_imgmsg(cv_image, "bgr8") self.publisher.publish(ros_image) if center[0] < 155 or center[0] > 165: # Check X coordinate self.contador = 0 # Calculate the movement of the head necessary to keep the ball in the center of the image # Velocities message initialization joint = JointAnglesWithSpeed() joint.joint_names.append("HeadYaw") joint.joint_names.append("HeadPitch") joint.speed = 0.1 joint.relative = True #True # Get center of detected object and calculate the head turns target_x = center[0] # centroid coordenate x target_y = center[1] # centroid coordenate y # Image center -> (cv_image.shape[1]/2, cv_image.shape[0]/2)=(320/2,240/2) (x,y) coordenates sub_x = target_x - cv_image.shape[1] / 2 sub_y = target_y - cv_image.shape[0] / 2 var_x = (sub_x / float(cv_image.shape[1] / 2)) var_y = (sub_y / float(cv_image.shape[0] / 2)) # Check the position of the head before moving it to restrict movement and prevent it from clashing with the shoulders # Subscribe to the topic / joint_states and save the first two values (HeadYaw and HeadPitch) in reference variables to compare before performing the move # Determine the new relative position in Y, which is the one I restrict to avoid collisions new_position_y = self.positions[1] + var_y * 0.15 if (new_position_y <= -0.45): var_y = 0 elif (new_position_y >= 0.3): var_y = 0 joint.joint_angles.append(-var_x * 0.10) joint.joint_angles.append(var_y * 0.15) self.joint_pub.publish(joint) else: print "Centrado respecto a X" if center[1] < 115 or center[1] > 125: # Check Y coordinate self.contador = 0 # Velocities message initialization joint = JointAnglesWithSpeed() joint.joint_names.append("HeadPitch") joint.speed = 0.1 joint.relative = True if center[1] < 115: var_y = -0.01 elif center[1] > 125: var_y = 0.01 joint.joint_angles.append(var_y) self.joint_pub.publish(joint) else: # Counter to ensure that the ball is centered and it was not a coincidence self.contador = self.contador + 1 print "Centrado respecto a Y" print "Contador: ", self.contador if self.contador > 10: self.image_sub.unregister() print "Centrado y salimos" print "HeadPitch: ", self.positions[1] # HeadPitch print "HeadYaw: ", self.positions[0] # HeadYaw <--> print " Center is: ", center # Obtain position Dreal = 0.08 # Real diameter of the ball in meters pixelToMeter = Dreal / (radius * 2) Yrobot = (cv_image.shape[1] / 2 - center[0]) * pixelToMeter radiusToMeters = 0.25 * 55 # At 0.25m distance the radius is 55px Xrobot = radiusToMeters / radius # Calculate Z coordinate according to the equation of the line that relates it to HeadPitch Zrobot = (-25.866 * self.positions[1] + 23.843 ) / 100 # Divide by 100 to move from cm to m # Create point point = Point() point.x = Xrobot point.y = Yrobot point.z = Zrobot self.pointPub.publish(point) print "Punto publicado: ", point self.image_sub.unregister()
def __init__(self): rospy.init_node('pure_pursuit', anonymous=True) rospy.Subscriber('/path', Path, self.path_callback) rospy.Subscriber('/odom', Odometry, self.odom_callback) rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.amcl_callback) self.motor_pub = rospy.Publisher('/commands/motor/speed', Float64, queue_size=1) self.servo_pub = rospy.Publisher('/commands/servo/position', Float64, queue_size=1) self.motor_msg = Float64() self.servo_msg = Float64() self.is_path = False self.is_odom = False self.is_amcl = False self.forward_point = Point() self.current_position = Point() self.is_look_forward_point = False self.vehicle_length = 0.5 self.lfd = 0.5 self.steering = 0 self.current_point_num = 0 self.steering_angle_to_servo_gain = -1.2135 self.steering_angle_to_servo_offset = 0.5304 rate = rospy.Rate(30) while not rospy.is_shutdown(): if self.is_path == True and (self.is_odom == True or self.is_amcl == True): vehicle_position = self.current_position rotated_point = Point() self.is_look_forward_point = False for num, i in enumerate(self.path.poses): path_point = i.pose.position path_num = num dx = path_point.x - vehicle_position.x dy = path_point.y - vehicle_position.y ## rotated_point: 자동차의 자세에 따른 좌표계로 변환된 점 rotated_point.x = cos(self.vehicle_yaw) * dx + sin( self.vehicle_yaw) * dy rotated_point.y = sin(self.vehicle_yaw) * dx - cos( self.vehicle_yaw) * dy if rotated_point.x > 0 and self.current_point_num <= path_num: dis = sqrt( pow(rotated_point.x, 2) + pow(rotated_point.y, 2)) if dis >= self.lfd: self.forward_point = path_point self.forward_point_num = path_num self.current_point_num = path_num self.is_look_forward_point = True break theta = -atan2(rotated_point.y, rotated_point.x) if self.is_look_forward_point == True: self.steering = atan2( (2 * self.vehicle_length * sin(theta)), self.lfd) print(self.steering * 180 / pi) self.motor_msg.data = 6000 else: self.steering = 0 print("no found forward point") self.motor_msg.data = 0 self.steering_command = ( self.steering_angle_to_servo_gain * self.steering) + self.steering_angle_to_servo_offset self.servo_msg.data = self.steering_command self.servo_pub.publish(self.servo_msg) self.motor_pub.publish(self.motor_msg) rate.sleep()
def create_box(self, average_x, average_y, std_dev_x, std_dev_y): self.marker.header.frame_id = "laser" self.marker.type = self.marker.LINE_STRIP self.marker.action = self.marker.ADD #marker scale self.marker.scale.x = .03 self.marker.scale.y = .03 self.marker.scale.z = .03 #marker color self.marker.color.a = 1.0 self.marker.color.r = 1.0 self.marker.color.g = 1.0 self.marker.color.b = 0.0 #marker orientation self.marker.pose.orientation.x = 0.0 self.marker.pose.orientation.y = 0.0 self.marker.pose.orientation.z = 0.0 self.marker.pose.orientation.w = 1.0 #marker position self.marker.pose.position.x = 0.0 self.marker.pose.position.y = 0.0 self.marker.pose.position.z = 0.0 #marker line points self.marker.points = [] #first point first_line_point = Point() first_line_point.x = self.average_x + CONST_VALUE first_line_point.y = self.average_y + CONST_VALUE first_line_point.z = 0.0 self.marker.points.append(first_line_point) #second point second_line_point = Point() second_line_point.x = self.average_x + CONST_VALUE second_line_point.y = self.average_y - CONST_VALUE second_line_point.z = 0.0 self.marker.points.append(second_line_point) #third point third_line_point = Point() third_line_point.x = self.average_x - CONST_VALUE third_line_point.y = self.average_y - CONST_VALUE third_line_point.z = 0.0 self.marker.points.append(third_line_point) #fourth point fourth_line_point = Point() fourth_line_point.x = self.average_x - CONST_VALUE fourth_line_point.y = self.average_y + CONST_VALUE fourth_line_point.z = 0.0 self.marker.points.append(fourth_line_point) #fifth point fifth_line_point = Point() fifth_line_point.x = self.average_x + CONST_VALUE fifth_line_point.y = self.average_y + CONST_VALUE fifth_line_point.z = 0.0 self.marker.points.append(fifth_line_point)
if __name__ == "__main__": #### Please use reference: http://www.orocos.org/kdl/usermanual/geometric-primitives tongstfs = GetTongsTransform() viveFullPose = Pose() viveFullPose.position.x = 0 viveFullPose.position.y = 0 viveFullPose.position.z = 0 viveFullPose.orientation.x = 0 viveFullPose.orientation.y = 0 viveFullPose.orientation.z = 0 viveFullPose.orientation.w = 1 p = Point() p.x = 0 p.y = 0 p.z = 0 tongstfs.getTransformsVive(6.5, viveFullPose) print tongstfs.upperForceSensorTransform.p print tongstfs.lowerForceSensorTransform.p print tongstfs.centerTransform.p print tongstfs.upperForceSensorTransform.M orientCenterStraight = tongstfs.centerTransform.p + tongstfs.centerTransform.M * kdl.Vector( 0, 0.025, 0) orientCenterUp = tongstfs.centerTransform.p + tongstfs.centerTransform.M * kdl.Vector( 0, 0, 0.025) fig = plt.figure()
follow_pub = rospy.Publisher('Kinect/Follow_point', Follow, queue_size=10) rospy.init_node('netPublisher', anonymous=True) rate = rospy.Rate(30) rospy.loginfo("Initializing socket...") sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('', port)) rospy.loginfo("Ready!") while not rospy.is_shutdown(): try: buf, (r_ip, r_port) = sock.recvfrom(1024) print(buf, r_ip, r_port) buf = buf.split(" ") if buf[0] == "open": ret = Point() ret.x = 1 ret.y = 0 ret.z = 0.0 wiw_pub.publish(ret) elif buf[0] == "pos": ret = Point() ret.x = float(buf[1]) ret.y = float(buf[2]) ret.z = 0.0 wiw_pub.publish(ret) elif buf[0] == "name": say_ser("Are you " + buf[1] + "?") # say Is your name ... elif buf[0] == "object": say_ser("Do you need " + buf[1] + "?") elif buf[0] == "yes1": say_ser("I remember you.") # say i remember you say_ser("What do you need?")
def GPSTxT_leader(self): global distance_difference, present_num, search, b, now_po base = base_frame() node_index = Int32() while not rospy.is_shutdown(): my_x, my_y = self.listener() path_leng = 0 search = 0 a = 0 a_grad = 0 distance_difference = 100 while (abs(distance_difference) > 0.01): distance_difference = (my_x - p[0][present_num + 1])**2 + ( my_y - p[1][present_num + 1])**2 - ( my_x - p[0][present_num])**2 - (my_y - p[1][present_num])**2 search = search + 1 a = -distance_difference / (1 + search / 100) if (a > -1 and a < 0): a = -1 elif (a < 1 and a > 0): a = 1 elif (math.isnan(a)): a = 1 present_num = present_num + int(a) if present_num > p_length - 2: present_num = p_length - 2 break elif present_num < 0: present_num = 0 break elif (search == 10): break plt.plot(my_x, my_y, 'ro', label='position') plt.show() Fsum = 0 s = [0] ld = self.lookahead_listener() while (Fsum < ld): big_node = int((present_num + path_leng) / Nsample) if (big_node > leng - 1): break ft = lambda t: ( (3 * poly[0][big_node][1] * t * t + 2 * poly[1][big_node][ 1] * t + poly[2][big_node][1])**2 + (3 * poly[0][big_node][0] * t * t + 2 * poly[1][big_node][ 0] * t + poly[2][big_node][0])**2)**0.5 for small_node in range(0, Nsample): F, err = quad(ft, float(small_node) / Nsample, float(small_node + 1) / Nsample) Fsum = Fsum + F path_leng = path_leng + 1 if Fsum > ld: break elif present_num + path_leng > p_length - 1: break s.append(Fsum) print(len(s), path_leng) #s=[Fsum] if (len(s) > 2): point_array = np.linspace(0, ld, 11) distance = ((my_x - p[0][present_num])**2 + (my_y - p[1][present_num])**2)**0.5 fpx = np.poly1d( np.polyfit(s, p[0][present_num:present_num + path_leng], 3)) fpy = np.poly1d( np.polyfit(s, p[1][present_num:present_num + path_leng], 3)) ix = fpx(point_array) iy = fpy(point_array) iangle = [] idistance = [] for scurve in range(0, Nsample): iangle.append( math.atan2(iy[scurve + 1] - iy[scurve], ix[scurve + 1] - ix[scurve])) idistance.append( math.sqrt((iy[scurve + 1] - iy[scurve])**2 + (ix[scurve + 1] - ix[scurve])**2)) a = ix[10] - ix[0] b = iy[10] - iy[0] c = my_x - ix[0] d = my_y - iy[0] if (a * d - b * c) > 0: direction = -1 else: direction = 1 now_po = Point() now_msg = Marker( type=Marker.POINTS, #points=Point(bm[i][0],bm[i][1],0), lifetime=rospy.Duration(0), scale=Vector3(0.5, 0.5, 0.1), header=Header(frame_id='map'), color=ColorRGBA(0.0, 1.0, 0.0, 0.8)) now_po.x = p[0][present_num] - self.offset_X now_po.y = p[1][present_num] - self.offset_Y now_po.z = 0 now_msg.points = [now_po] now_msg.id = 6 # rviz_msg.points.x=p.x self.now_plot.publish(now_msg) print("------------------------------") print(direction, distance) print(present_num, search, s[len(s) - 1]) print(my_x, my_y) print("------------------------------") # print(ix) print(iy) print(iangle) print(idistance) # now_po.x=my_x-self.offset_X # now_po.y=my_y-self.offset_Y # now_po.z=0 # now_po.points=[po] # now_po.id=0 # rviz_msg.points.x=p.x # self.now_plot.publish(now_po) node_index.data = int(present_num / Nsample) base.distance = direction * distance base.ld = ld base.s_x = ix base.s_y = iy base.s_a = iangle base.s_d = idistance base.tm_x = my_x base.tm_y = my_y self.big_node_num.publish(node_index) self.base_frame.publish(base)
def moveRobot(x, y): goal = Point() goal.x = x goal.y = y return goal
def perform_drill(self): # AIR PRESSURE : 90 PSI # OIL DAMPER : 9 # Constants [v0, v1] = [9.46, 0.11] [d0, d1] = [0.00, 12.70] filter_size = 11 filter_ord = 2 # Variables drill_depth_reached = False drill_success = True bin_t = [] bin_d = [] T0 = rospy.get_time() while (True): # break ti = rospy.get_time() - T0 print(ti) if ti > 20.0: drill_success = False break vi = self.robot.get_analog_in(2) di = (d1 - d0) / (v1 - v0) * (vi - v0) + d0 bin_t.append(ti) bin_d.append(di) displacement = Point() displacement.x = 0 # Lower Limit displacement.y = di displacement.z = 0 # Upper Limit # Run-out Reached n = 10 if len(bin_d) >= n and drill_depth_reached == False: x = bin_t[len(bin_t) - n:len(bin_t)] y = bin_d[len(bin_t) - n:len(bin_t)] a, b = linear_fit(x, y) if a < 0.05 and bin_d[-1] > 2.0: drill_depth_reached = True T1 = rospy.get_time() if drill_depth_reached and rospy.get_time() - T1 > 0.5: break # if len(bin_t) > filter_size: # # win_t = np.asarray(bin_t[len(bin_t)-filter_size:len(bin_t)]) # win_d = np.asarray(bin_d[len(bin_t)-filter_size:len(bin_t)]) # # dt = (win_t[-1] - win_t[0])/(filter_size-1) # # di = savitzky_golay(win_d, window_size=filter_size, order=filter_ord, deriv=0)[filter_size/2] # fi = np.dot(savitzky_golay(win_d, window_size=filter_size, order=filter_ord, deriv=1), 1.0/dt)[filter_size / 2] # # # Publish # # feedrate = Point() # feedrate.x = 0 # Lower Limit # feedrate.y = fi # feedrate.z = 0 # Upper Limit # self.pub_feedrate.publish(feedrate) # # # Run-out Reached # # if not drill_depth_reached and np.abs(fi) < 0.05: # T1 = rospy.get_time() # drill_depth_reached = True # # if drill_depth_reached and rospy.get_time() - T1 > 1.00: # break rospy.sleep(0.05) now = datetime.datetime.now() return drill_success
def publish_tracked_people(self, now): """ Publish markers of tracked people to Rviz and to <people_tracked> topic """ people_tracked_msg = PersonArray() people_tracked_msg.header.stamp = now people_tracked_msg.header.frame_id = self.publish_people_frame marker_id = 0 # Make sure we can get the required transform first: if self.use_scan_header_stamp_for_tfs: tf_time = now try: self.listener.waitForTransform(self.publish_people_frame, self.fixed_frame, tf_time, rospy.Duration(1.0)) transform_available = True except: transform_available = False else: tf_time = rospy.Time(0) transform_available = self.listener.canTransform(self.publish_people_frame, self.fixed_frame, tf_time) marker_id = 0 if not transform_available: rospy.loginfo("Person tracker: tf not avaiable. Not publishing people") else: for person in self.objects_tracked: if person.is_person == True: if self.publish_occluded or person.seen_in_current_scan: # Only publish people who have been seen in current scan, unless we want to publish occluded people # Get position in the <self.publish_people_frame> frame ps = PointStamped() ps.header.frame_id = self.fixed_frame ps.header.stamp = tf_time ps.point.x = person.pos_x ps.point.y = person.pos_y try: ps = self.listener.transformPoint(self.publish_people_frame, ps) except: rospy.logerr("Not publishing people due to no transform from fixed_frame-->publish_people_frame") continue # publish to people_tracked topic new_person = Person() new_person.pose.position.x = ps.point.x new_person.pose.position.y = ps.point.y yaw = math.atan2(person.vel_y, person.vel_x) quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw) new_person.pose.orientation.x = quaternion[0] new_person.pose.orientation.y = quaternion[1] new_person.pose.orientation.z = quaternion[2] new_person.pose.orientation.w = quaternion[3] new_person.id = person.id_num people_tracked_msg.people.append(new_person) # publish rviz markers # Cylinder for body marker = Marker() marker.header.frame_id = self.publish_people_frame marker.header.stamp = now marker.ns = "People_tracked" marker.color.r = person.colour[0] marker.color.g = person.colour[1] marker.color.b = person.colour[2] marker.color.a = (rospy.Duration(3) - (rospy.get_rostime() - person.last_seen)).to_sec()/rospy.Duration(3).to_sec() + 0.1 marker.pose.position.x = ps.point.x marker.pose.position.y = ps.point.y marker.id = marker_id marker_id += 1 marker.type = Marker.CYLINDER marker.scale.x = 0.2 marker.scale.y = 0.2 marker.scale.z = 1.2 marker.pose.position.z = 0.8 self.marker_pub.publish(marker) # Sphere for head shape marker.type = Marker.SPHERE marker.scale.x = 0.2 marker.scale.y = 0.2 marker.scale.z = 0.2 marker.pose.position.z = 1.5 marker.id = marker_id marker_id += 1 self.marker_pub.publish(marker) # Text showing person's ID number marker.color.r = 1.0 marker.color.g = 1.0 marker.color.b = 1.0 marker.color.a = 1.0 marker.id = marker_id marker_id += 1 marker.type = Marker.TEXT_VIEW_FACING marker.text = str(person.id_num) marker.scale.z = 0.2 marker.pose.position.z = 1.7 self.marker_pub.publish(marker) # Arrow pointing in direction they're facing with magnitude proportional to speed marker.color.r = person.colour[0] marker.color.g = person.colour[1] marker.color.b = person.colour[2] marker.color.a = (rospy.Duration(3) - (rospy.get_rostime() - person.last_seen)).to_sec()/rospy.Duration(3).to_sec() + 0.1 start_point = Point() end_point = Point() start_point.x = marker.pose.position.x start_point.y = marker.pose.position.y end_point.x = start_point.x + 0.5*person.vel_x end_point.y = start_point.y + 0.5*person.vel_y marker.pose.position.x = 0. marker.pose.position.y = 0. marker.pose.position.z = 0.1 marker.id = marker_id marker_id += 1 marker.type = Marker.ARROW marker.points.append(start_point) marker.points.append(end_point) marker.scale.x = 0.05 marker.scale.y = 0.1 marker.scale.z = 0.2 self.marker_pub.publish(marker) # <self.confidence_percentile>% confidence bounds of person's position as an ellipse: cov = person.filtered_state_covariances[0][0] + person.var_obs # cov_xx == cov_yy == cov std = cov**(1./2.) gate_dist_euclid = scipy.stats.norm.ppf(1.0 - (1.0-self.confidence_percentile)/2., 0, std) marker.pose.position.x = ps.point.x marker.pose.position.y = ps.point.y marker.type = Marker.SPHERE marker.scale.x = 2*gate_dist_euclid marker.scale.y = 2*gate_dist_euclid marker.scale.z = 0.01 marker.color.r = person.colour[0] marker.color.g = person.colour[1] marker.color.b = person.colour[2] marker.color.a = 0.1 marker.pose.position.z = 0.0 marker.id = marker_id marker_id += 1 self.marker_pub.publish(marker) # Clear previously published people markers for m_id in xrange(marker_id, self.prev_person_marker_id): marker = Marker() marker.header.stamp = now marker.header.frame_id = self.publish_people_frame marker.ns = "People_tracked" marker.id = m_id marker.action = marker.DELETE self.marker_pub.publish(marker) self.prev_person_marker_id = marker_id # Publish people tracked message self.people_tracked_pub.publish(people_tracked_msg)
rospy.loginfo("waiting for Robot 2 to send") while f != i1: r.sleep() rospy.loginfo("Starting Calculations:") global x3 global y3, ax, ay x3 = float(a1) y3 = float(b1) ax = float(a) ay = float(b) rospy.loginfo("Points of Robot 2") if abs(ax - x3) < 0.001 and abs(ay - y3) < 0.001: break else: goal1.x = 0.5 * ax + 0.5 * x3 goal1.y = 0.5 * ay + 0.5 * y3 rospy.loginfo("Finished Iteration:") rospy.loginfo(i1) i1 = i1 + 1 px = str(goal1.x) py = str(goal1.y) strMsg = px + "@" + py + "@" + str(i1) list1.append(strMsg) rospy.loginfo("strMsg") pubPos.publish(strMsg) r.sleep() rospy.loginfo("Published current iteration value:") status = 0 ax = round(ax, 2) ay = round(ay, 2) rospy.loginfo(ax)
import rospy from geometry_msgs.msg import Twist,Point from nav_msgs.msg import Odometry from std_msgs.msg import Empty from math import pow,atan2,sqrt,radians import tf import time import math from tf.transformations import euler_from_quaternion current_position = Point(0.0,0.0,0.0) goal_position = Point() goal_position.x =-3 goal_position.y =3 goal_position.z= 0 def odometry1_callback(msg): global current_position current_position.x=msg.pose.pose.position.x current_position.y=msg.pose.pose.position.y orientation_data=msg.pose.pose.orientation quaternion=[orientation_data.x,orientation_data.y,orientation_data.z,orientation_data.w] roll,pitch,yaw=tf.transformations.euler_from_quaternion(quaternion) current_position.z=yaw def euclidean_distance(X,Y):