def main(args=None): test_msg = std.Header(frame_id="A") rclpy.init(args=args) node = rclpy.create_node("std_msg") pub = node.create_publisher(std.Header, "std_msg", 10) pub.publish(test_msg) node.get_logger().info(yaml.dump(YamlHeader(test_msg), canonical=False))
def video_to_msg(): rospy.init_node("video_to_msg_node", anonymous=True) pub_image = rospy.Publisher("/axis/image_raw", Image, queue_size=10) pub_compr = rospy.Publisher("/axis/image_raw/compressed", CompressedImage, queue_size=10) pub_info = rospy.Publisher("/axis/camera_info", CameraInfo, queue_size=10) filename = sys.argv[1] video = cv2.VideoCapture(filename) if not video.isOpened(): print "Error: Can't open: " + str(filename) exit(0) print "Correctly opened, starting to publish." fps = video.get(cv2.CAP_PROP_FPS) rate = rospy.Rate(fps) count_max = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) resolution = video.get(cv2.CAP_PROP_FRAME_HEIGHT) bridge = CvBridge() msg_header = std.Header() msg_header.frame_id = "/axis" if resolution == 720.0: msg_info = info_720p() elif resolution == 480.0: msg_info = info_480p() success, frame = video.read() count = 1 while success and not rospy.is_shutdown(): msg_image = bridge.cv2_to_imgmsg(np.uint8(frame), encoding="rgb8") msg_compr = bridge.cv2_to_compressed_imgmsg(frame, dst_format="jpeg") msg_header.stamp = rospy.Time.now() msg_header.seq = count msg_image.header = msg_header msg_compr.header = msg_header msg_info.header = msg_header #print msg_compr #time.sleep(10) pub_image.publish(msg_image) pub_compr.publish(msg_compr) pub_info.publish(msg_info) print str(count) + "/" + str(count_max) rate.sleep() success, frame = video.read() count += 1
def getObjects(mapInfo): with open(mapInfo, 'r') as stream: try: yamled = yaml.load(stream) except yaml.YAMLError as exc: print(exc) # deletes info del yamled['info'] # Populates dictionary with location names and their poses objDict = yamled.values() objLocations = {} objNames = {} for item in objDict: itemName = item['name'] if itemName[0:4] != "wall": x_loc = item['centroid_x'] + (item['width'] / 2 + .6) * math.cos( math.radians(item['orientation'])) y_loc = item['centroid_y'] + (item['length'] / 2 + .6) * math.sin( math.radians(item['orientation'])) quat = tf.transformations.quaternion_from_euler( 0, 0, item['orientation'] - 180) itemLoc = geo_msgs.PoseStamped( std_msgs.Header(), geo_msgs.Pose( geo_msgs.Point(x_loc, y_loc, 0), geo_msgs.Quaternion(quat[0], quat[1], quat[2], quat[3]))) objLocations[itemName] = itemLoc objNames[itemName] = ([item['value']]) return objLocations, objNames
def _update_coem(self): """Extracts and stores a new explored center of mass in the map""" # Get the latest occupancy grid map latest_map = rospy.wait_for_message("/map", nav_msgs.OccupancyGrid) # Build an opencv binary image, with 1 corresponding to space that has # been marked as free map_data = np.array(latest_map.data, dtype=np.int8).reshape(latest_map.info.width, latest_map.info.height) free_space_map = ((map_data >= 0) & (map_data <= 50)).astype(np.uint8) # Use "image moments" to compute the centre of mass # TODO should probably incorporate orientation in coordinate calc... ms = cv2.moments(free_space_map, True) centre = np.array([ms['m10'], ms['m01']]) / ms['m00'] centre_coordinates = np.array([ latest_map.info.origin.position.x, latest_map.info.origin.position.y ]) + latest_map.info.resolution * centre # Update "centre of explored mass" in the abstract map # TODO remove debug self._abstract_map._spatial_layout._coem = centre_coordinates self._debug_coem.publish( geometry_msgs.PoseStamped( header=std_msgs.Header(stamp=rospy.Time.now(), frame_id='map'), pose=tools.xythToPoseMsg(centre_coordinates[0], centre_coordinates[1], 0)))
def makePointCloud2Msg(points, frame_time, parent_frame, pcd_format): ros_dtype = sensor_msgs.PointField.FLOAT32 dtype = np.float32 itemsize = np.dtype(dtype).itemsize data = points.astype(dtype).tobytes() fields = [ sensor_msgs.PointField(name=n, offset=i * itemsize, datatype=ros_dtype, count=1) for i, n in enumerate(pcd_format) ] # header = std_msgs.Header(frame_id=parent_frame, stamp=rospy.Time.now()) header = std_msgs.Header(frame_id=parent_frame, stamp=rospy.Time.from_sec(frame_time)) num_field = len(pcd_format) return sensor_msgs.PointCloud2(header=header, height=1, width=points.shape[0], is_dense=False, is_bigendian=False, fields=fields, point_step=(itemsize * num_field), row_step=(itemsize * num_field * points.shape[0]), data=data)
def make_header(frame='/body'): try: cur_time = rospy.Time.now() except rospy.ROSInitException: cur_time = rospy.Time(0) header = std_msgs.Header(stamp=cur_time, frame_id=frame) return header
def make_header(frame='/body', stamp=None): if stamp is None: try: stamp = rospy.Time.now() except rospy.ROSInitException: stamp = rospy.Time(0) header = std_msgs.Header(stamp=stamp, frame_id=frame) return header
def to_pose3d(pose, timestamp=rospy.Time(), frame=None): if isinstance(pose, geometry_msgs.Pose2D): p = geometry_msgs.Pose(geometry_msgs.Point(pose.x, pose.y, 0.0), quaternion_msg_from_yaw(pose.theta)) if not frame: return p return geometry_msgs.PoseStamped(std_msgs.Header(0, timestamp, frame), p) raise rospy.ROSException( "Input parameter pose is not a geometry_msgs.Pose2D object")
def publishTagMarkers(tags, publisher): """Republishes the tag_markers topic""" SHIFT = 0.05 h = std_msgs.Header(stamp=rospy.Time.now(), frame_id='map') ca = std_msgs.ColorRGBA(r=1, a=1) cb = std_msgs.ColorRGBA(g=1, a=1) ct = std_msgs.ColorRGBA(b=1, a=1) sa = geometry_msgs.Vector3(x=0.1, y=0.5, z=0.5) sb = geometry_msgs.Vector3(x=0.1, y=0.25, z=0.25) st = geometry_msgs.Vector3(x=1, y=1, z=1) ma = visualization_msgs.MarkerArray() ma.markers.append( visualization_msgs.Marker( header=h, id=-1, action=visualization_msgs.Marker.DELETEALL)) for i, t in enumerate(tags): th = t['th_deg'] * math.pi / 180. q = tf.transformations.quaternion_from_euler(0, 0, th) q = geometry_msgs.Quaternion(x=q[0], y=q[1], z=q[2], w=q[3]) ma.markers.append( visualization_msgs.Marker( header=h, id=i * 3, type=visualization_msgs.Marker.CUBE, action=visualization_msgs.Marker.ADD, pose=geometry_msgs.Pose( position=geometry_msgs.Point(x=t['x'], y=t['y']), orientation=q), scale=sa, color=ca)) ma.markers.append( visualization_msgs.Marker( header=h, id=i * 3 + 1, type=visualization_msgs.Marker.CUBE, action=visualization_msgs.Marker.ADD, pose=geometry_msgs.Pose( position=geometry_msgs.Point( x=t['x'] + SHIFT * math.cos(th), y=t['y'] + SHIFT * math.sin(th)), orientation=q), scale=sb, color=cb)) ma.markers.append( visualization_msgs.Marker( header=h, id=i * 3 + 2, type=visualization_msgs.Marker.TEXT_VIEW_FACING, action=visualization_msgs.Marker.ADD, pose=geometry_msgs.Pose( position=geometry_msgs.Point(x=t[1], y=t[2], z=0.5), orientation=q), scale=st, color=ct, text=str(t['id']))) publisher.publish(ma)
def point_cloud(self, points, parent_frame): import sensor_msgs.msg as sensor_msgs import std_msgs.msg as std_msgs """ Creates a point cloud message. Args: points: Nx3 array of xyz positions. parent_frame: frame in which the point cloud is defined Returns: sensor_msgs/PointCloud2 message Code source: https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0 References: http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html """ # In a PointCloud2 message, the point cloud is stored as an byte # array. In order to unpack it, we also include some parameters # which desribes the size of each individual point. if not len(points): return sensor_msgs.PointCloud2() ros_dtype = sensor_msgs.PointField.FLOAT32 dtype = np.float32 itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes. data = points.astype(dtype).tobytes() # The fields specify what the bytes represents. The first 4 bytes # represents the x-coordinate, the next 4 the y-coordinate, etc. fields = [ sensor_msgs.PointField(name=n, offset=i * itemsize, datatype=ros_dtype, count=1) for i, n in enumerate('xyz') ] # The PointCloud2 message also has a header which specifies which # coordinate frame it is represented in. header = std_msgs.Header(frame_id=parent_frame) return sensor_msgs.PointCloud2( header=header, height=1, width=points.shape[0], is_dense=False, is_bigendian=False, fields=fields, point_step=(itemsize * 3), # Every point consists of three float32s. row_step=(itemsize * 3 * points.shape[0]), data=data, )
def vectors2BodyInfo(translation, rotation, velocity, twist, linear_accel, angular_accel): header = std_msgs.Header() header.stamp = rospy.Time.now() pose = geo_msgs.Pose(Array2Point(translation), Array2Quaternion(rotation)) twist = geo_msgs.Twist(Array2Vector3(velocity), Array2Vector3(twist)) accel = geo_msgs.Accel(Array2Vector3(linear_accel), Array2Vector3(angular_accel)) return uav_msgs.BodyInfo(header, pose, twist, accel)
def getRobberLocation(self, tfMsg): poseMsg = geo_msgs.PoseStamped( std_msgs.Header(), geo_msgs.Pose( geo_msgs.Point(tfMsg.transform.translation.x, tfMsg.transform.translation.y, tfMsg.transform.translation.z), geo_msgs.Quaternion(tfMsg.transform.rotation.x, tfMsg.transform.rotation.y, tfMsg.transform.rotation.z, tfMsg.transform.rotation.w))) self.robLoc = poseMsg
def Header(frame_id="/map", stamp=None): """Make a Header >>> h = Header("/base_link") >>> assert h.stamp.secs > 0 >>> assert h.stamp.nsecs > 0 """ if not stamp: _time = rospy.Time.now() else: _time = stamp header = std.Header(stamp=_time, frame_id=frame_id) return header
def getCopLocation(self, tfMsg): self.pastCopLoc = self.copLoc poseMsg = geo_msgs.PoseStamped( std_msgs.Header(), geo_msgs.Pose( geo_msgs.Point(tfMsg.transform.translation.x, tfMsg.transform.translation.y, tfMsg.transform.translation.z), geo_msgs.Quaternion(tfMsg.transform.rotation.x, tfMsg.transform.rotation.y, tfMsg.transform.rotation.z, tfMsg.transform.rotation.w))) self.copLoc = poseMsg
def copDetection(): copName = "deckard" robberName = "roy" # Get list of objects and their locations mapInfo = 'map2.yaml' objLocations = getObjects(mapInfo) vertexes = objLocations.values() vertexKeys = objLocations.keys() # TODO: GetPlan not working because move base does not provide the service (EITHER FIX IT OR FIND A NEW WAY TO GET DISTANCE) # tol = .1 # robberName = "roy" # robLoc = geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(1,1,0), geo_msgs.Quaternion(0,0,0,1))) # destination = geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(0,1,0), geo_msgs.Quaternion(0,0,0,1))) # print(robLoc) # print(destination) # # Get plan from make_plan service # #rospy.wait_for_service("/" + robberName + "move_base/make_plan") # try: # planner = rospy.ServiceProxy("/" + robberName + "/move_base/make_plan", nav_srv.GetPlan) # plan = planner(robLoc, destination, tol) # poses = plan.plan.poses # print(poses) # except rospy.ServiceException, e: # print "GetPlan service call failed: %s"%e # return 0 # rospy.Subscriber("/" + copName + "/", geo_msgs.Pose, getCopLocation) # rospy.Subscriber("/" + robberName + "/", geo_msgs.Pose, getRobberLocation) copLoc = geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(1,1,0), geo_msgs.Quaternion(0,0,1,0))) pastCopLoc = geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(0,1,0), geo_msgs.Quaternion(0,0,1,0))) robLoc = geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(1,0,0), geo_msgs.Quaternion(0,0,1,0))) test_path = [geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(0,1,0), geo_msgs.Quaternion(0,0,0,1))), geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(1,0,0), geo_msgs.Quaternion(0,0,0,1)))] test_plans = {"kitchen": nav_msgs.Path(std_msgs.Header(), test_path)} curCost, curDestination = chooseDestination(test_plans, copLoc, robLoc, pastCopLoc) print("Minimum Cost: " + str(curCost) + " at " + curDestination) #move base to curDestination # state = mover_base.get_state() # pathFailure = False # while (state!=3 and pathFailure==False): # SUCCESSFUL # #wait a second rospy.sleep(1) # newCost = evaluatePath(test_plans[curDestination]) # if newCost > curCost*2: # pathFailure = True # rospy.loginfo("Just Reached " + vertexKeys[i]) # Floyd warshall stuff mapGrid = np.load('mapGrid.npy') floydWarshallCosts = np.load('floydWarshallCosts.npy') evaluateFloydCost(copLoc, robLoc, floydWarshallCosts, mapGrid)
def publish(self, *_): """Publishes to any required topics, only if conditions are met""" del _ # DEBUG TODO DELETE if self._debug_publish: self._pub_am.publish( std_msgs.String(data=pickle.dumps(self._abstract_map))) # Only proceed publishing if the network has recently changed from # being settled to unsettled (or vice versa) settled = self._abstract_map._spatial_layout.isSettled() if settled == self._last_settled: return # Update the paused status if settled: self._abstract_map._spatial_layout._paused = True # Publish the abstract map as a pickled byte stream self._pub_am.publish( std_msgs.String(data=pickle.dumps(self._abstract_map))) # Publish an updated goal if the running in goal mode if (self._abstract_map._spatial_layout.isObserved(self._goal) and settled): self._goal_complete = True rospy.loginfo("MISSION ACCOMPLISHED! %s was found." % (self._goal)) elif settled and self._pub_goal is not None: # Get the suggested pose for the goal from the abstract map goal_pos = self._abstract_map.getToponymLocation(self._goal) # Send the message (only if we found a suggested pose for the goal) if goal_pos is not None: self._pub_goal.publish( geometry_msgs.PoseStamped( header=std_msgs.Header(stamp=rospy.Time.now(), frame_id='map'), pose=geometry_msgs.Pose( position=geometry_msgs.Vector3( goal_pos[0], goal_pos[1], 0), orientation=tools.yawToQuaternionMsg(0)))) # Update the last_settled state self._last_settled = settled if settled: self._debug_lock = True # pudb.set_trace() self._debug_lock = False
def main(): #Read ROS parameter arguments, if present #phasespace_ip_addr="..." #framerate=50 # ros_params = rospy.get_param('~') # #Read command line options # parser = argparse.ArgumentParser() # parser.add_argument('server_ip') # parser.add_argument('-f', '--framerate', default=1, type=float, help='Desired framerate') # args = parser.parse_args() #Initialize the ROS node pub = rospy.Publisher('mocap_point_cloud', sensor_msgs.PointCloud) rospy.init_node('mocap_streamer') #Load the mocap stream ip = rospy.get_param('phasespace/ip', '192.168.0.6') with load_mocap.PhasespaceMocapSource(ip, num_points=10, framerate=480).get_stream() as mocap: #Play the points from the mocap stream #Loop until the node is killed with Ctrl+C frame_num = 0 while 1: frame = mocap.read(block=True)[0].squeeze() if rospy.is_shutdown(): break print('STREAMING: Frame ' + str(frame_num), end='\r') sys.stdout.flush() #Construct and publish the message message = sensor_msgs.PointCloud() message.header = std_msgs.Header() message.header.frame_id = 'world' #'mocap' #message.header.time = rospy.get_rostime() message.points = [] for i in range(frame.shape[0]): point = geometry_msgs.Point32() point.x = frame[i, 0] / 1000 point.y = -frame[i, 2] / 1000 point.z = frame[i, 1] / 1000 message.points.append(point) pub.publish(message) frame_num += 1 print('\rSTOPPED: Frame ' + str(frame_num))
def _resolve(self): # type is a reserved keyword. Maybe unpacking a dict as kwargs is # cleaner query = SimpleQueryRequest(id=self.entity_designator.resolve()) entities = self.ed(query).entities if entities: entity = entities[0] pointstamped = gm.PointStamped(point=entity.pose.position, header=std.Header( entity.id, rospy.get_rostime()) ) # ID is also the frame ID. Ed just works that way self._current = pointstamped return self.current else: return None
def publish_action_twist(self): v_w = self.domain.dynamics.setpoint_body.linearVelocity w_w = self.domain.dynamics.setpoint_body.angularVelocity a = self.domain.dynamics.grasp_body.angle m = tf.transformations.rotation_matrix(-a, [0, 0, 1])[0:2, 0:2] m *= (1.0 / self.domain.dynamics.simulation_scale) v_g = np.dot(m, v_w) v = geom_msg.Vector3(v_g[0], v_g[1], 0) w = geom_msg.Vector3(0, 0, w_w) ts = geom_msg.TwistStamped( std_msg.Header(None, self.get_domain_sim_time(), "grasp"), geom_msg.Twist(v, w)) self.action_twist_publisher.publish(ts)
def publish_estimated_position(self,target,x,y): ''' Publish the estimated target position using Particle Filter ''' # make the 2D (xyzrgba) array x = x.reshape([x.size,1])[::10] y = y.reshape([y.size,1])[::10] z = np.zeros(x.shape) r = np.ones(x.shape) g = np.zeros(x.shape) b = np.zeros(x.shape) a = np.zeros(x.shape)+0.3 points = np.concatenate((x,y,z,r,g,b,a),axis=1) ros_dtype = sensor_msgs.PointField.FLOAT32 dtype = np.float32 itemsize = np.dtype(dtype).itemsize data = points.astype(dtype).tobytes() fields = [sensor_msgs.PointField(name=n, offset=i*itemsize, datatype=ros_dtype, count=1) for i, n in enumerate('xyzrgba')] header = std_msgs.Header(frame_id='world_ned',stamp=rospy.Time.now()) pc2 = sensor_msgs.PointCloud2( header=header, height=1, width=points.shape[0], is_dense=False, is_bigendian=False, fields=fields, point_step=(itemsize*7), row_step=(itemsize*7*points.shape[0]), data=data) #Publish the particles positions as a point cloud self.point_cloud.publish(pc2) #Publish the estimated target posiiton # Create the point message point = PointStamped() point.header.stamp = rospy.Time.now() point.header.frame_id = "world_ned" point.point.x = target.pf._x[0] point.point.y = target.pf._x[2] self.p_estimated_target_position.publish(point) return
def __init__(self, cam): self.cam = cam # initialize the node named image_processing rospy.init_node("sphehre_position_estimation_xz", anonymous=True) self.h = msg.Header() self.h.stamp = rospy.Time.now() # initialize a publisher to send images from camera1 to a topic named image_topic1 self.image_pub1 = rospy.Publisher( "sphere_xz", String, queue_size=1 ) # initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data self.image_sub1 = rospy.Subscriber("/camera2/robot/image_raw", Image, self.callback2) # initialize the bridge between openCV and ROS self.bridge = CvBridge()
def publish_forcetorque(self): ft_w = self.domain.forcetorque_measurement f_w = ft_w[0:2] # this is ft measured in the physics simulation frame. # however, in real life, the sensor is mounted on the grasp frame. # so project the force into the rotated frame. a = self.domain.dynamics.grasp_body.angle m = tf.transformations.rotation_matrix(-a, [0, 0, 1])[0:2, 0:2] f_g = np.dot(m, f_w) f = geom_msg.Vector3(f_g[0], f_g[1], 0) t = geom_msg.Vector3(0, 0, ft_w[2]) ws = geom_msg.WrenchStamped( std_msg.Header(None, self.get_domain_sim_time(), "grasp"), geom_msg.Wrench(f, t)) self.forcetorque_publisher.publish(ws)
def chat(): pub = rospy.Publisher('chatter', Chat, queue_size=10) rospy.Subscriber('chatter', Chat, callback) name = raw_input('What is your username? ') rospy.init_node(name) rate = rospy.Rate(10) while not rospy.is_shutdown(): hello_str = raw_input('Type new message below: \n') header = msg.Header() header.stamp = rospy.Time.now() source_id = msg.String(rospy.get_name()) message = msg.String(hello_str) c = Chat(header, source_id, message) push_data(log, c) pub.publish(c) print_log(log) rate.sleep()
def point_cloud(points, parent_frame, has_rgb=False): """ Creates a point cloud message. Args: points: Nx7 array of xyz positions (m) and rgba colors (0..1) parent_frame: frame in which the point cloud is defined Returns: sensor_msgs/PointCloud2 message """ ros_dtype = sensor_msgs.PointField.FLOAT32 dtype = np.float32 itemsize = np.dtype(dtype).itemsize data = points.astype(dtype).tobytes() channels = 'xyzrgb' steps = 6 pts_num = points.shape[0] if not has_rgb: channels = "xyz" steps = 3 pts_num = points.shape[0] fields = [ sensor_msgs.PointField(name=n, offset=i * itemsize, datatype=ros_dtype, count=1) for i, n in enumerate(channels) ] header = std_msgs.Header(frame_id=parent_frame, stamp=rospy.Time.now()) # print(f"datasize {len(points.astype(dtype).tobytes())} pts_num {pts_num} point_step {itemsize * steps}") return sensor_msgs.PointCloud2(header=header, height=1, width=pts_num, is_dense=False, is_bigendian=False, fields=fields, point_step=(itemsize * steps), row_step=(itemsize * steps * pts_num), data=data)
def publishWalls(): cloudPoints = [] if (wallLeft > 0): cloudPoints.append([0.5, -13.0 / wallLeft, 0.5]) #print("Object on Left"); if (wallRight > 0): cloudPoints.append([0.5, 13.0 / wallRight, 0.5]) #print("Object on Right"); if (wallFrontLeft > 0): cloudPoints.append([13.0 / wallFrontLeft, 0.5, 0.5]) #print("Object at Front"); if (wallFrontRight > 0): cloudPoints.append([13.0 / wallFrontRight, -0.5, 0.5]) #print("Object at Front"); header = stdMsg.Header() header.stamp = ros.Time.now() header.frame_id = 'world' myPointCloud = pcl2.create_cloud_xyz32(header, cloudPoints) t = geoMsg.TransformStamped() t.header.stamp = ros.Time.now() t.header.frame_id = 'world' t.child_frame_id = 'zumo' t.transform.translation.x = transX t.transform.translation.y = transY t.transform.translation.z = transZ t.transform.rotation.x = rotX t.transform.rotation.y = rotY t.transform.rotation.z = rotZ t.transform.rotation.w = rotW cloudOut = doTransformCloud(myPointCloud, t) pclPub.publish(cloudOut)
def chooseDestination(plans, copLoc, robLoc, pastCopLoc): # Create costmap then sum cost of poses along path OR just calculate cost of the points then sum that up # cost equation = (distance from max_cost point) + 1/2(how much cop is looking at that point) + (if cop is moving toward that point) cop_pos = copLoc.pose.position cop_quat = copLoc.pose.orientation cop_euler = tf.transformations.euler_from_quaternion([cop_quat.x, cop_quat.y, cop_quat.z, cop_quat.w]) # max costmap area is .5 meter in front of cop max_cost_distance = .5 max_cost_pos = geo_msgs.PoseStamped(std_msgs.Header(), geo_msgs.Pose(geo_msgs.Point(cop_pos.x + max_cost_distance*math.cos(cop_euler[0]), cop_pos.y + max_cost_distance*math.sin(cop_euler[1]), 0), geo_msgs.Quaternion(0,0,0,1))) min_cost = 100000 min_cost_location = "" # Evaluate each object's plan for object_name, plan in plans.items(): pathCost = evaluatePath(plan, copLoc, robLoc, pastCopLoc, max_cost_pos) if pathCost < min_cost: min_cost = pathCost min_cost_location = object_name # print(pathCost) return min_cost, min_cost_location
def point_cloud(points, parent_frame): """ Creates a point cloud message. Args: points: Nx7 array of xyz positions (m) and rgba colors (0..1) parent_frame: frame in which the point cloud is defined Returns: sensor_msgs/PointCloud2 message """ ros_dtype = sensor_msgs.PointField.FLOAT32 dtype = np.float32 itemsize = np.dtype(dtype).itemsize data = points.astype(dtype).tobytes() fields = [ sensor_msgs.PointField('x', 0, ros_dtype, 1), sensor_msgs.PointField('y', itemsize, ros_dtype, 1), sensor_msgs.PointField('z', 2*itemsize, ros_dtype, 1), sensor_msgs.PointField('intensity', 3*itemsize, ros_dtype, 1), sensor_msgs.PointField('velocity', 4*itemsize, ros_dtype, 1)] header = std_msgs.Header(frame_id=parent_frame, stamp=rospy.Time.now()) return sensor_msgs.PointCloud2( header=header, height=1, width=points.shape[0],#number of points in the frame is_dense=False, is_bigendian=False, fields=fields, point_step=(itemsize * 5), #point_step=32, row_step=(itemsize * 5 * points.shape[0]), #row_step=32*points.shape[0], data=data )
from sensor_msgs.msg import Image, CameraInfo import std_msgs.msg as std image_size = 400 freq_rate = 0.2 mode = "none" #"stack""overlay" R = np.array([[0.9999756813050, 0.004391118884090, 0.005417240317910], [-0.0043891328387, 0.999990284443000, -0.000378685304895], [-0.0054188510403, 0.000354899209924, 0.999985337257000]]) T = np.array([-0.0641773045063, 0.000311704527121, -4.76178320241e-06]) bridge = CvBridge() frame_mutex = Lock() msg_info = CameraInfo() msg_header = std.Header() def callback_img1(data): global img1_data img1_data = bridge.imgmsg_to_cv2(data, desired_encoding="passthrough") def callback_img2(data): global img2_data img2_data = bridge.imgmsg_to_cv2(data, desired_encoding="passthrough") def callback_info1(data): global K_left, D_left, time_diff if data.header.stamp.to_sec() < 1581534822:
def image_publisher(): rospy.init_node('video_to_image', anonymous=True) mono_pub = rospy.Publisher("/video/mono/image", Image, queue_size=10) mono_raw_pub = rospy.Publisher("/video/mono/image_raw", Image, queue_size=10) mono_info_pub = rospy.Publisher("/video/mono/camera_info", CameraInfo, queue_size=10) depth_pub = rospy.Publisher("/video/depth/image", Image, queue_size=10) depth_raw_pub = rospy.Publisher("/video/depth/image_raw", Image, queue_size=10) depth_info_pub = rospy.Publisher("/video/depth/camera_info", CameraInfo, queue_size=10) swreg_pub = rospy.Publisher("/video/swreg/image", Image, queue_size=10) swreg_raw_pub = rospy.Publisher("/video/swreg/image_raw", Image, queue_size=10) swreg_info_pub = rospy.Publisher("/video/swreg/camera_info", CameraInfo, queue_size=10) filename = sys.argv[1] video = cv2.VideoCapture(filename) if not video.isOpened(): print "Error: Can't open: " + str(filename) exit(0) print "Correctly opened, starting to publish." fps = video.get(cv2.CAP_PROP_FPS) rate = rospy.Rate(fps) count_max = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) bridge = CvBridge() header_msg = std.Header() header_msg.frame_id = "/video_link" rgb_info = info_param() depth_info = info_param() swreg_info = info_param() success, frame = video.read() count = 1 while success and not rospy.is_shutdown(): rgb_img = frame[cut_idx[1]:cut_idx[1], cut_idx[3]:cut_idx[3]] mono_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY) mono_filt_img = cv2.GaussianBlur(mono_img, (23, 23), 0) depth_img = 100.0 / (mono_filt_img + 100.0 / d_max) depth_raw_img = 1000.0 * depth_img mono = bridge.cv2_to_imgmsg(np.uint8(mono_img), encoding="mono8") mono_raw = bridge.cv2_to_imgmsg(np.uint8(mono_img), encoding="16UC1") depth = bridge.cv2_to_imgmsg(np.float32(depth_img), encoding="32FC1") depth_raw = bridge.cv2_to_imgmsg(np.uint16(depth_raw_img), encoding="16UC1") swreg = bridge.cv2_to_imgmsg(np.float32(depth_img), encoding="32FC1") swreg_raw = bridge.cv2_to_imgmsg(np.uint16(depth_raw_img), encoding="16UC1") header_msg.stamp = rospy.Time.now() header_msg.seq = count mono.header = header_msg mono_raw.header = header_msg mono_info.header = header_msg depth.header = header_msg depth_raw.header = header_msg depth_info.header = header_msg swreg.header = header_msg swreg_raw.header = header_msg swreg_info.header = header_msg mono_pub.publish(mono) mono_raw_pub.publish(mono_raw) mono_info.publish(mono_info) depth_pub.publish(depth) depth_raw_pub.publish(depth_raw) depth_info_pub.publish(depth_info) swreg_pub.publish(swreg) swreg_raw_pub.publish(swreg_raw) swreg_info_pub.publish(swreg_info) print str(count) + "/" + str(count_max) rate.sleep() success, frame = video.read() count += 1
class RoombaSensorHandler(object): ROOMBA_PULSES_TO_M = 0.000445558279992234 def __init__(self, robot): self._robot = robot self._sensor_state_struct = struct.Struct( ">12B2hBHhb7HBH5B4h2HB6H2B4hb") self._last_encoder_counts = None def _request_packet(self, packet_id): """Reqeust a sensor packet.""" with self._robot.sci.lock: self._robot.sci.flush_input() self._robot.sci.sensors(packet_id) #kwc: there appears to be a 10-20ms latency between sending the #sensor request and fully reading the packet. Based on #observation, we are preferring the 'before' stamp rather than #after. stamp = time.time() length = create_driver.SENSOR_GROUP_PACKET_LENGTHS[packet_id] return self._robot.sci.read(length), stamp def _deserialize(self, buffer, timestamp): try: ( self.bumps_wheeldrops, self.wall, self.cliff_left, self.cliff_front_left, self.cliff_front_right, self.cliff_right, self.virtual_wall, self.motor_overcurrents, self.dirt_detector_left, self.dirt_detector_right, self.remote_opcode, self.buttons, self.distance, self.angle, self.charging_state, self.voltage, self.current, self.temperature, self.charge, self.capacity, self.wall_signal, self.cliff_left_signal, self.cliff_front_left_signal, self.cliff_front_right_signal, self.cliff_right_signal, self.user_digital_inputs, self.user_analog_input, self.charging_sources_available, self.oi_mode, self.song_number, self.song_playing, self.number_of_stream_packets, self.requested_velocity, self.requested_radius, self.requested_right_velocity, self.requested_left_velocity, self.encoder_counts_left, self.encoder_counts_right, self.light_bumper, self.light_bump_left, self.light_bump_front_left, self.light_bump_center_left, self.light_bump_center_right, self.light_bump_front_right, self.light_bump_right, self.ir_opcode_left, self.ir_opcode_right, self.left_motor_current, self.right_motor_current, self.main_brish_current, self.side_brush_current, self.statis, ) = self._sensor_state_struct.unpack(buffer[0:80]) except struct.error, e: raise roslib.message.DeserializationError(e) self.wall = bool(self.wall) self.cliff_left = bool(self.cliff_left) self.cliff_front_left = bool(self.cliff_front_left) self.cliff_front_right = bool(self.cliff_front_right) self.cliff_right = bool(self.cliff_right) self.virtual_wall = bool(self.virtual_wall) self.song_playing = bool(self.song_playing) # do unit conversions self.header = std_msgs.Header(stamp=rospy.Time.from_seconds(timestamp)) self.requested_velocity = float(self.requested_velocity) / 1000. self.requested_radius = float(self.requested_radius) / 1000. self.requested_right_velocity = float( self.requested_right_velocity) / 1000. self.requested_left_velocity = float( self.requested_left_velocity) / 1000. # The distance and angle calculation sent by the robot seems to # be really bad. Re-calculate the values using the raw enconder # counts. if self._last_encoder_counts: count_delta_left = self._normalize_encoder_count( self.encoder_counts_left - self._last_encoder_counts[0], 0xffff) count_delta_right = self._normalize_encoder_count( self.encoder_counts_right - self._last_encoder_counts[1], 0xffff) distance_left = count_delta_left * self.ROOMBA_PULSES_TO_M distance_right = count_delta_right * self.ROOMBA_PULSES_TO_M self.distance = (distance_left + distance_right) / 2.0 self.angle = ( distance_right - distance_left ) / robot_types.ROBOT_TYPES['roomba'].wheel_separation else: self.disance = 0 self.angle = 0 self._last_encoder_counts = (self.encoder_counts_left, self.encoder_counts_right)