def calculate_mk2_ik(self): #goal_point = Point(0.298, -0.249, -0.890) home position goal_pose = Pose() # Goto position 1 goal_point = Point(0.298, -0.249, -0.890) #goal_point = Point(0.298, -0.5, -1.0) goal_pose.position = goal_point quat = quaternion_from_euler(0.0, 0.0, 0.0) # roll, pitch, yaw goal_pose.orientation = Quaternion(*quat.tolist()) moveit_goal = self.create_move_group_pose_goal(goal_pose, group="manipulator", end_link_name="link_7", plan_only=False) rospy.loginfo("Sending goal...") self.moveit_ac.send_goal(moveit_goal) rospy.loginfo("Waiting for result...") self.moveit_ac.wait_for_result(rospy.Duration(10.0)) moveit_result = self.moveit_ac.get_result() # Goto position 2 goal_point = Point(0.305, -0.249, -1.05) goal_pose.position = goal_point quat = quaternion_from_euler(0.0, 0.0, 0.0) # roll, pitch, yaw goal_pose.orientation = Quaternion(*quat.tolist()) moveit_goal = self.create_move_group_pose_goal(goal_pose, group="manipulator", end_link_name="link_7", plan_only=False) rospy.loginfo("Sending goal...") self.moveit_ac.send_goal(moveit_goal) rospy.loginfo("Waiting for result...") self.moveit_ac.wait_for_result(rospy.Duration(10.0)) moveit_result = self.moveit_ac.get_result()
def update(self): # protected region updateCode on begin # in_CameraDetections = copy.deepcopy(self.in_CameraDetections) # check if detection is available if len(in_CameraDetections.poses) <= 0: return # do transformation from pixel coords to camera_base_link coords in meter out1_CameraDetections = PoseArray() out1_CameraDetections.header = in_CameraDetections.header for pose in in_CameraDetections.poses: new_pose = Pose() new_pose.position.x = (pose.position.x - self.config_ResolutionX/2.0) * self.config_MeterPerPixel new_pose.position.y = (pose.position.y - self.config_ResolutionY/2.0) * self.config_MeterPerPixel new_pose.position.z = 0.0 new_pose.orientation = pose.orientation out1_CameraDetections.poses.append(new_pose) # do transformation from camera_base_link to robot base_link out_CameraDetections = PoseArray() out_CameraDetections.header = out1_CameraDetections.header for pose in out1_CameraDetections.poses: new_pose = Pose() new_pose.position.x = self.camera_base_link_offset_X + pose.position.x new_pose.position.y = self.camera_base_link_offset_Y - pose.position.y new_pose.position.z = 0.0 new_pose.orientation = pose.orientation # TODO: rotate 180deg around x-axis out_CameraDetections.poses.append(new_pose) self.out_CameraDetections = out_CameraDetections # protected region updateCode end # pass
def __init__(self, from_frame, to_frame, position, orientation): self.server = InteractiveMarkerServer("posestamped_im") o = orientation r, p, y = euler_from_quaternion([o.x, o.y, o.z, o.w]) rospy.loginfo("Publishing transform and PoseStamped from: " + from_frame + " to " + to_frame + " at " + str(position.x) + " " + str(position.y) + " " + str(position.z) + " and orientation " + str(o.x) + " " + str(o.y) + " " + str(o.z) + " " + str(o.w) + " or rpy " + str(r) + " " + str(p) + " " + str(y)) self.menu_handler = MenuHandler() self.from_frame = from_frame self.to_frame = to_frame # Transform broadcaster self.tf_br = tf.TransformBroadcaster() self.pub = rospy.Publisher('/posestamped', PoseStamped, queue_size=1) rospy.loginfo("Publishing posestampeds at topic: " + str(self.pub.name)) pose = Pose() pose.position = position pose.orientation = orientation self.last_pose = pose self.print_commands(pose) self.makeGraspIM(pose) self.server.applyChanges()
def processFeedback(feedback): #p = feedback.pose.position #print feedback.marker_name + " is now at " + str(p.x) + ", " + str(p.y) + ", " + str(p.z) pub_pose.publish(feedback.pose) if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK: pub_event.publish(event_on_click) rospy.loginfo( ": button click" "." ) elif feedback.event_type == InteractiveMarkerFeedback.MENU_SELECT: rospy.loginfo( ": menu item " + str(feedback.menu_entry_id) + " clicked" ) print(feedback.menu_entry_id) if feedback.menu_entry_id==1: rospy.loginfo( ": reset pose" ) p=Pose(); p=feedback.pose; p.orientation=starting_pose.orientation server.setPose( feedback.marker_name, p ) server.applyChanges() elif feedback.menu_entry_id==2: rospy.loginfo( ": reset orientation" ) server.setPose( feedback.marker_name, starting_pose ) server.applyChanges() if feedback.menu_entry_id==3: print(feedback.menu_entry_id) pub_event.publish(event_on_click) elif feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP: rospy.loginfo( ": new Pose is " + str(feedback.pose))
def free_spots_on_table(table, obj, orientation, blocking_objs, res=0.1): object_pose = Pose() object_pose.orientation = orientation #these are the corners of the object in the object's frame object_frame_corners = gt.bounding_box_corners(obj.shapes[0]) #rospy.loginfo('Corners are') #for c in object_frame_corners: rospy.loginfo(str(c)) #these are the corners after we apply the rotation header_frame_corners = [gt.transform_list(c, object_pose) for c in object_frame_corners] #these are the corners relative to the table's lower left corner table_frame_corners = [gt.inverse_transform_list(c, table.poses[0]) for c in header_frame_corners] #the footprint of the object above the table oxmax = max([c[0] for c in table_frame_corners]) oxmin = min([c[0] for c in table_frame_corners]) oymax = max([c[1] for c in table_frame_corners]) oymin = min([c[1] for c in table_frame_corners]) object_dims = (oxmax - oxmin, oymax - oymin) rospy.loginfo('Object dimensions are '+str(object_dims)) obj_poses = free_spots_from_dimensions(table, object_dims, blocking_objs, res=res) #return these as proper object poses with the object sitting on the table #the height of the object in the world in its place orientation obj_height = -1.0*min([c[2] for c in header_frame_corners]) above_table = gt.transform_list([0, 0, obj_height], table.poses[0]) place_height = above_table[2] for pose in obj_poses: pose.pose.position.z = place_height pose.pose.orientation = copy.copy(orientation) return obj_poses
def particle_to_pose(particle): ''' Converts a particle in the form [x, y, theta] into a Pose object ''' pose = Pose() pose.position.x = particle[0] pose.position.y = particle[1] pose.orientation = angle_to_quaternion(particle[2]) return pose
def __init__(self, nodes) : self.node_zone = MarkerArray() for node in nodes.nodes : marker = Marker() marker.header.frame_id = "/map" marker.type = marker.LINE_STRIP marker.scale.x = 0.1 marker.color.a = 0.8 marker.color.r = 0.7 marker.color.g = 0.1 marker.color.b = 0.2 node._get_coords() count=0 for i in node.vertices : #print i[0], i[1] Vert = Point() Vert.z = 0.05 Vert.x = node.px + i[0] Vert.y = node.py + i[1] marker.points.append(Vert) #vertname = "%s-%d" %(node.name, count) Pos = Pose() Pos.position = Vert # OJO: esto hay que hacerlo de alguna forma #self._vertex_marker(vertname, Pos, vertname) count+=1 marker.points.append(marker.points[0]) self.node_zone.markers.append(marker) idn = 0 for m in self.node_zone.markers: m.id = idn idn += 1
def poseFromTransform(cls, t): p = Pose() p.position.x = t.translation.x p.position.y = t.translation.y p.position.z = t.translation.z p.orientation = deepcopy(t.rotation) return p
def add_collision_cluster(self, cluster, object_id): ''' Adds a point cloud to the collision map as a single collision object composed of many small boxes. **Args:** **cluster (sensor_msg.msg.PointCloud):** The point cloud to add to the map **object_id (string):** The name the point cloud should have in the map ''' many_boxes = CollisionObject() many_boxes.operation.operation = many_boxes.operation.ADD many_boxes.header = cluster.header many_boxes.header.stamp = rospy.Time.now() num_to_use = int(len(cluster.points)/100.0) random_indices = range(len(cluster.points)) random.shuffle(random_indices) random_indices = random_indices[0:num_to_use] for i in range(num_to_use): shape = Shape() shape.type = Shape.BOX shape.dimensions = [.005]*3 pose = Pose() pose.position.x = cluster.points[random_indices[i]].x pose.position.y = cluster.points[random_indices[i]].y pose.position.z = cluster.points[random_indices[i]].z pose.orientation = Quaternion(*[0,0,0,1]) many_boxes.shapes.append(shape) many_boxes.poses.append(pose) many_boxes.id = object_id self._publish(many_boxes, self._collision_object_pub)
def main(argv): x = float(sys.argv[1]) y = float(sys.argv[2]) th = float(sys.argv[3]) print("x: %.3f y: %.3f Th: %.1f") % (x, y, th) # print 2D pose data to terminal time.sleep(0.5) #publish pose data to ros topic msg = Pose() q = tf.transformations.quaternion_from_euler(0, 0, th) msg.position = Point(x,y,0) msg.orientation.x = q[0] msg.orientation.y = q[1] msg.orientation.z = q[2] msg.orientation.w = q[3] pose_pub.publish(msg) key_pressed = False while key_pressed == False: key_pressed = select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) # is key pressed? time.sleep(0.5) sys.exit()
def follow_pose_with_admitance_by_ft(self): fx, fy, fz = self.get_force_movement() rospy.loginfo( "fx, fy, fz: " + str((round(fx, 3), round(fy, 3), round(fz, 3)))) ps = Pose() if abs(fx) > self.fx_deadband: ps.position.x = (fx * self.fx_scaling) * self.frame_fixer.fx if abs(fy) > self.fy_deadband: ps.position.y = (fy * self.fy_scaling) * self.frame_fixer.fy if abs(fz) > self.fz_deadband: ps.position.z = (fz * self.fz_scaling) * self.frame_fixer.fz tx, ty, tz = self.get_torque_movement() rospy.loginfo( "tx, ty, tz: " + str((round(tx, 3), round(ty, 3), round(tz, 3)))) roll = pitch = yaw = 0.0 if abs(tx) > self.tx_deadband: roll += (tx * self.tx_scaling) * self.frame_fixer.tx if abs(ty) > self.ty_deadband: pitch += (ty * self.ty_scaling) * self.frame_fixer.ty if abs(tz) > self.tz_deadband: yaw += (tz * self.tz_scaling) * self.frame_fixer.tz q = quaternion_from_euler(roll, pitch, yaw) ps.orientation = Quaternion(*q) o = self.last_pose_to_follow.pose.orientation r_lastp, p_lastp, y_lastp = euler_from_quaternion([o.x, o.y, o.z, o.w]) final_roll = r_lastp + roll final_pitch = p_lastp + pitch final_yaw = y_lastp + yaw self.current_pose.pose.orientation = Quaternion(*quaternion_from_euler(final_roll, final_pitch, final_yaw)) self.current_pose.pose.position.x = self.last_pose_to_follow.pose.position.x + \ ps.position.x self.current_pose.pose.position.y = self.last_pose_to_follow.pose.position.y + \ ps.position.y self.current_pose.pose.position.z = self.last_pose_to_follow.pose.position.z + \ ps.position.z self.current_pose.pose.position.x = self.sanitize(self.current_pose.pose.position.x, self.min_x, self.max_x) self.current_pose.pose.position.y = self.sanitize(self.current_pose.pose.position.y, self.min_y, self.max_y) self.current_pose.pose.position.z = self.sanitize(self.current_pose.pose.position.z, self.min_z, self.max_z) self.current_pose.header.stamp = rospy.Time.now() if self.send_goals: # send MODIFIED GOALS self.pose_pub.publish(self.current_pose) else: self.last_pose_to_follow.header.stamp = rospy.Time.now() self.pose_pub.publish(self.last_pose_to_follow) self.debug_pose_pub.publish(self.current_pose)
def project(self, p, radius, width, height) : #print p #print width #print height #print "radius" #print radius xFOV = 63.38 yFOV = 48.25 cx = width /2 cy = height /2 fx = cx / np.tan((xFOV/2) * np.pi / 180) fy = cy / np.tan((yFOV/2) * np.pi / 180) toball = np.zeros(3) toball[0] = (p[0] - cx) / fx toball[1] = -(p[1] - cy) / fy toball[2] = 1 toball = toball / np.linalg.norm(toball) #normalize so we can then multiply by distance distance = self.pixel_radius / radius toball = toball * distance pose = Pose() pose.position = Point(toball[0], toball[1], toball[2]) pose.orientation = Quaternion(0,0,0,1) #print "FOUND ORANGE BALL!!!!" #print toball return pose
def process_collision_geometry_for_cluster(self, cluster): rospy.loginfo("adding cluster with %d points to collision map"%len(cluster.points)) many_boxes = CollisionObject() many_boxes.operation.operation = CollisionObjectOperation.ADD many_boxes.header = cluster.header many_boxes.header.stamp = rospy.Time.now() num_to_use = int(len(cluster.points)/100.0) random_indices = range(len(cluster.points)) scipy.random.shuffle(random_indices) random_indices = random_indices[0:num_to_use] for i in range(num_to_use): shape = Shape() shape.type = Shape.BOX shape.dimensions = [.005]*3 pose = Pose() pose.position.x = cluster.points[random_indices[i]].x pose.position.y = cluster.points[random_indices[i]].y pose.position.z = cluster.points[random_indices[i]].z pose.orientation = Quaternion(*[0,0,0,1]) many_boxes.shapes.append(shape) many_boxes.poses.append(pose) collision_name = self.get_next_object_name() many_boxes.id = collision_name self.object_in_map_pub.publish(many_boxes) return collision_name
def detect_faces_cb_ros_pkg(userdata, status, result): '''This code detects faces using the ROS package and then checks the list of people who are not able to walk. If the distance of people who are not able to walk is close to the detected faces they are popped from the list. First perosn who is closer than ---closestPersonDistance--- are sent to userdata''' LOCATION_LIST_DISTANCE = 1 print "CallBack is called" people = result.face_positions # pose_in_map = transform_pose(people.pos, people.header.frame_id, "/map") if userdata.location_list: for person in people: pose_in_stereo = Pose() pose_in_stereo.position = person.pos pose_in_map = transform_pose(pose_in_stereo, userdata.message.header.frame_id, "/map") for not_walking in userdata.location_list: distance_from_list = ((pose_in_map.position.x - not_walking[0])**2 + (pose_in_map.position.pos.y - not_walking[1])**2) if distance_from_list < LOCATION_LIST_DISTANCE: people.pop(people.index(person)) closestPerson = None closestPersonDistance = sys.maxint for person in people: dist = person.pos.z if dist < closestPersonDistance: closestPerson = person closestPersonDistance = dist # FIXME: blacklist already seen people userdata.closest_person = closestPerson print "Closest person (from find_person_with_visit_checker): ", closestPerson return succeeded if closestPerson else aborted
def eef_pose_pub(): rospy.init_node('eef_publisher') rospy.loginfo("Publishing right EEF gripper location") listener = tf.TransformListener() pub = rospy.Publisher('eef_pose', Pose, queue_size=10) DEFAULT_LINK = '/right_ee_link' DEFAULT_RATE = 100 # Pull from param server the hz and EEF link eef_link = rospy.get_param("~eef_link", DEFAULT_LINK) publish_rate = rospy.get_param("~eef_rate", DEFAULT_RATE) rate = rospy.Rate(publish_rate) while not rospy.is_shutdown(): try: trans, rot = listener.lookupTransform('/base_link', eef_link, rospy.Time(0)) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): continue msg = Pose() msg.position = Point() msg.position.x, msg.position.y, msg.position.z = trans[0], trans[1], trans[2] msg.orientation = Quaternion() msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w = rot[0], rot[1], rot[2], rot[3] pub.publish(msg) rate.sleep()
def callback_Distance(data): #print data global i_distance m = marker_array.markers[markers_list[id_]] m.header.stamp = rospy.Time.now() m.action = 0 m.type = m.SPHERE p = Pose() #print data.data[0], data.data[1], data.data[2] p.position = Point(data.data[0], data.data[1], data.data[2]) if data.data[3] == data.data[4] : scale = data.data[3] else : i_distance = i_distance + 1 scale = data.data[3] if i_distance%2 == 0: scale = data.data[4] m.pose = p m.scale.x = scale * 2 m.scale.y = scale * 2 m.scale.z = scale * 2 m.color.r = 0 m.color.g = 1 m.color.b = 0 m.color.a = 0.2 m.lifetime = rospy.Duration(0) m.frame_locked = False
def homogeneous_product_pose_transform(pose, transform): """ pose should be Pose() msg transform is Transform() """ # 1. Get the transform homogenous matrix. Ht = tf.transformations.quaternion_matrix([transform.rotation.x, transform.rotation.y, transform.rotation.z, transform.rotation.w]) Tt = [transform.translation.x, transform.translation.y, transform.translation.z] Ht[:3,3] = Tt # 2.Original pose to homogenous matrix H0 = tf.transformations.quaternion_matrix([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]) T0 = [pose.position.x, pose.position.y, pose.position.z] H0[:3,3] = T0 H1 = numpy.dot(H0, Ht) # Get results from matrix result_position = H1[:3,3].T result_position = geometry_msgs.msg.Vector3(result_position[0], result_position[1], result_position[2]) result_quat = tf.transformations.quaternion_from_matrix(H1) result_quat = geometry_msgs.msg.Quaternion(result_quat[0], result_quat[1], result_quat[2], result_quat[3]) result_pose = Pose() result_pose.position = result_position result_pose.orientation = result_quat return result_pose
def get_pose_msg_from_2d(x, y, yaw): pose = Pose() pose.position.x = x pose.position.y = y pose.position.z = 0 pose.orientation = Quaternion(*quaternion_from_euler(0,0,yaw)) return pose
def find_joint_pose(self, pose, targ_x=0.0, targ_y=0.0, targ_z=0.0, targ_ox=0.0, targ_oy=0.0, targ_oz=0.0): ''' Finds the joint position of the arm given some pose and the offsets from it (to avoid opening the structure all the time outside of the function). ''' ik_srv = "ExternalTools/right/PositionKinematicsNode/IKService" iksvc = rospy.ServiceProxy(ik_srv, SolvePositionIK) ik_request = SolvePositionIKRequest() the_pose = deepcopy(pose) the_pose['position'] = Point(x=targ_x + self.baxter_off.linear.x, y=targ_y + self.baxter_off.linear.y, z=targ_z + self.baxter_off.linear.z) angles = tf.transformations.quaternion_from_euler( \ targ_ox + self.baxter_off.angular.x, \ targ_oy + self.baxter_off.angular.y, \ targ_oz + self.baxter_off.angular.z) the_pose['orientation'] = Quaternion(x=angles[0], y=angles[1], z=angles[2], w=angles[3]) approach_pose = Pose() approach_pose.position = the_pose['position'] approach_pose.orientation = the_pose['orientation'] hdr = Header(stamp=rospy.Time.now(), frame_id='base') pose_req = PoseStamped(header=hdr, pose=approach_pose) ik_request.pose_stamp.append(pose_req) resp = iksvc(ik_request) return dict(zip(resp.joints[0].name, resp.joints[0].position))
def __init__(self): smach.StateMachine.__init__(self, [succeeded, preempted, aborted]) with self: intro_text = "Good!, I will take it to the hanger." smach.StateMachine.add('INTRO', SpeakActionState(intro_text), transitions={succeeded: 'MOVE_TO_HANGER'}) # move to hanger self.userdata.hanger = HANGER_NAME smach.StateMachine.add('MOVE_TO_HANGER', MoveToRoomStateMachine(announcement=None), transitions={succeeded: 'APPROACH_TO_HANGER', preempted: preempted, aborted: 'MOVE_TO_HANGER'}, remapping={'room_name': 'hanger'}) # Rotate 180º pose_turn = Pose() pose_turn.orientation = Quaternion(*quaternion_from_euler(0,0,3.6)) #smach.StateMachine.add('MOVE_TO_HANGER', #MoveActionState(move_base_action_name='/move_by/move_base',pose=pose_turn), #transitions={succeeded: 'APPROACH_TO_HANGER', #preempted: preempted, #aborted: 'MOVE_TO_HANGER'}, #remapping={'room_name': 'hanger'}) smach.StateMachine.add('APPROACH_TO_HANGER', SMClothHangingApproachHangerPartStateMachine(), transitions={succeeded: succeeded, preempted: preempted, aborted: aborted})
def publishSphere2(self, pose, color, scale, lifetime=None): """ Publish a sphere Marker. This renders a smoother, flatter-looking sphere. @param pose (numpy matrix, numpy ndarray, ROS Pose) @param color name (string) or RGB color value (tuple or list) @param scale (ROS Vector3, float) @param lifetime (float, None = never expire) """ if (self.muted == True): return True # Convert input pose to a ROS Pose Msg if (type(pose) == numpy.matrix) or (type(pose) == numpy.ndarray): sphere_pose = mat_to_pose(pose) elif type(pose) == Pose: sphere_pose = pose elif type(pose) == Point: pose_msg = Pose() pose_msg.position = pose sphere_pose = pose_msg else: rospy.logerr("Pose is unsupported type '%s' in publishSphere()", type(pose).__name__) return False # Convert input scale to a ROS Vector3 Msg if type(scale) == Vector3: sphere_scale = scale elif type(scale) == float: sphere_scale = Vector3(scale, scale, scale) else: rospy.logerr("Scale is unsupported type '%s' in publishSphere()", type(scale).__name__) return False # Increment the ID number self.sphere_marker.id += 1 # Get the default parameters sphere_marker = self.sphere_marker2 # sphere_marker2 = SPHERE_LIST if lifetime == None: sphere_marker.lifetime = rospy.Duration(0.0) # 0 = Marker never expires else: sphere_marker.lifetime = rospy.Duration(lifetime) # in seconds # Set the timestamp sphere_marker.header.stamp = rospy.Time.now() # Set marker size sphere_marker.scale = sphere_scale # Set marker color sphere_marker.color = self.getColor(color) # Set the pose of one sphere in the list sphere_marker.points[0] = sphere_pose.position sphere_marker.colors[0] = self.getColor(color) return self.publishMarker(sphere_marker)
def _create_vertices_array(self) : for node in self.topo_map.nodes : marker = Marker() marker.header.frame_id = "/map" marker.type = marker.LINE_STRIP #marker.lifetime=2 marker.scale.x = 0.1 marker.color.a = 0.8 marker.color.r = 0.7 marker.color.g = 0.1 marker.color.b = 0.2 node._get_coords() count=0 for i in node.vertices : #print i[0], i[1] Vert = Point() Vert.z = 0.05 Vert.x = node.px + i[0] Vert.y = node.py + i[1] marker.points.append(Vert) vertname = "%s-%d" %(node.name, count) Pos = Pose() Pos.position = Vert self._vertex_marker(vertname, Pos, vertname) count+=1 marker.points.append(marker.points[0]) self.node_zone.markers.append(marker) idn = 0 for m in self.node_zone.markers: m.id = idn idn += 1
def obj_response_cb(userdata, response): if response.exists: pose = Pose() #Wont there be problems mixing float32 with 64? TODO pose.position = Point(response.base_coordinates.x, response.base_coordinates.y, 0) #TODO, this may give problems pose.orientation = Quaternion(*quaternion_from_euler(0, 0, response.base_coordinates.z)) if MANTAIN_DISTANCE_FROM_OBJECT: pose = pose_at_distance(pose, DISTANCE_FROM_OBJECT) userdata.object_location = pose release_pose = ObjectPose() release_pose.pose = response.arm_coordinates userdata.object_release_location = release_pose print "\n\n printing pose for move to object" print pose print "\n\n printing pose for releasing the object" print release_pose print "\n that was the pose of the object\n\n" print userdata.object_name print "is the object name" return succeeded else: userdata.object_location = None return aborted
def transform_pose(pose_in, transform): ''' Transforms a pose Takes a pose defined in the frame defined by transform (i.e. the frame in which transform is the origin) and returns it in the frame in which transform is defined. Calling this with the origin pose will return transform. This returns pose.point = transform_point(pose_in.point, transform) pose.orientation = transform_orientation(pose_in.orientation, transform) **Args:** **pose (geometry_msgs.msg.Pose):** Pose to transform **transform (geometry_msgs.msg.Pose):** The transform **Returns:** A geometry_msgs.msg.Pose ''' pose = Pose() pose.position = transform_point(pose_in.position, transform) pose.orientation = transform_quaternion(pose_in.orientation, transform) return pose
def execute(self, userdata): rospy.loginfo("Creating goal to put robot in front of handle") pose_handle = Pose() if userdata.door_detection_data_in_base_link.handle_side == "left" or userdata.door_detection_data_in_base_link.handle_side == "right": # closed door (r, p, theta) = euler_from_quaternion(( userdata.door_detection_data_in_base_link.door_handle.pose.orientation.x, userdata.door_detection_data_in_base_link.door_handle.pose.orientation.y, userdata.door_detection_data_in_base_link.door_handle.pose.orientation.z, userdata.door_detection_data_in_base_link.door_handle.pose.orientation.w)) # gives back r, p, y theta += 3.1416 # the orientation of the door is looking towards the robot, we need the inverse pose_handle.orientation = Quaternion(*quaternion_from_euler(0, 0, theta)) # orientation to look parallel to the door pose_handle.position.x = userdata.door_detection_data_in_base_link.door_handle.pose.position.x - 0.4 # to align the shoulder with the handle pose_handle.position.y = userdata.door_detection_data_in_base_link.door_handle.pose.position.y + 0.2 # refer to upper comment pose_handle.position.z = 0.0 # we dont need the Z for moving userdata.door_handle_pose_goal = pose_handle else: # open door # if it's open... just cross it? (r, p, theta) = euler_from_quaternion(( userdata.door_detection_data_in_base_link.door_position.pose.orientation.x, userdata.door_detection_data_in_base_link.door_position.pose.orientation.y, userdata.door_detection_data_in_base_link.door_position.pose.orientation.z, userdata.door_detection_data_in_base_link.door_position.pose.orientation.w)) # gives back r, p, y theta += 3.1416 # the orientation of the door is looking towards the robot, we need the inverse pose_handle.orientation = Quaternion(*quaternion_from_euler(0, 0, theta)) # orientation to look parallel to the door pose_handle.position.x = userdata.door_detection_data_in_base_link.door_position.pose.position.x + 1.0 # enter the door pose_handle.position.y = userdata.door_detection_data_in_base_link.door_position.pose.position.y userdata.door_handle_pose_goal = pose_handle rospy.loginfo("Move base goal: \n" + str(pose_handle)) return succeeded
def execute(self, userdata): userdata.orders = [DrinkOrder('david', 'coke'), DrinkOrder('michael', 'milk')] pose = Pose() pose.position = Point(0.059, 6.26, 0.0) pose.orientation = Quaternion(0, 0, -0.59, 0.8) userdata.guests_location = pose return succeeded
def get_grasp_pose(g_decision_making_state, object_grasp_pose, object_grasp_pose_set): # load pre-computer grasps # loaded_file = numpy.load('../grasp_data/' + g_decision_making_state.object_name + '.npz' ) # loaded_grasp = loaded_file['grasp'][0] # print len(loaded_grasp[g_decision_making_state.object_name]) loaded_file = numpy.load('../grasp_data/' + g_decision_making_state.object_name + '.npz' ) loaded_grasp = loaded_file['grasp'][0] num_of_grasp = len(loaded_grasp[g_decision_making_state.object_name]) temp = object_grasp_pose for i in range(0,num_of_grasp): print i print_angles(loaded_grasp[g_decision_making_state.object_name][i]) print_angles(temp) object_grasp_pose_temp = temp print_angles(temp) object_grasp_pose_set[i] = Pose() object_grasp_pose_set[i].position = temp.position object_grasp_pose_set[i].orientation = rotate_orientation(object_grasp_pose_temp.orientation, loaded_grasp[g_decision_making_state.object_name][i].orientation) move_ok = plan_and_move_end_effector_to(object_grasp_pose, 0.3, g_bin_positions[g_decision_making_state.bin_id]['map_robot_position_2_ik_seed']) print_angles(object_grasp_pose_set[i]) # raw_input("press Enter to continue...") return object_grasp_pose_set
def publisher(paths): publisher = rospy.Publisher('pathfinder', Path, queue_size=20) rospy.init_node('path_publisher', anonymous=True) rate = rospy.Rate(10) # 10 hertz msgs=[] #list for all the messages we need to publish for path in paths: msg = Path() #new path message msg.header = create_std_h() #add a header to the path message for x,y,z in path: #create a pose for each position in the path #position is the only field we care about, leave the rest as 0 ps = PoseStamped() ps.header=create_std_h() i=Pose() i.position=Point(x,y,z) ps.pose=i msg.poses.append(ps) msgs.append(msg) while not rospy.is_shutdown(): # while we're going #publish each message for m in msgs: #rospy.loginfo(m) publisher.publish(m) rate.sleep()
def __init__(self): smach.StateMachine.__init__(self, [succeeded, preempted, aborted], output_keys=['object_data']) with self: pose = Pose() pose.position = Point(0, 0, 0) pose.orientation = Quaternion(*quaternion_from_euler(0, 0, 45)) smach.StateMachine.add( 'N2_go_to_dep_policy', MoveActionState("/base_link", pose=pose), transitions = {succeeded: 'look_for_objects'}) def moped_enable_cb(userdata, response): if response.correct != None: rospy.loginfo("ENABLE_CLOSE_OBJECT_SEARCH response: " + \ str(response.correct)) return succeeded else: return aborted smach.StateMachine.add( 'ENABLE_CLOSE_OBJECT_SEARCH', ServiceState('/iri_moped_handler/enable', enable, response_cb = moped_enable_cb, #request_key = 'object_name', request = True), transitions = {succeeded:'look_for_objects'}) # FIXME: why is there a smaller ctimeout & retry on aborted? # If something goes wrong it'd rather have it fail than having # it keep hanging on forever... --Siegfried smach.StateMachine.add( 'look_for_objects', TopicReaderState( topic_name='/iri_moped_handler/outputOPL', msg_type=ObjectPoseList, timeout=10), remapping= {'message' : 'object_data'}, transitions = {succeeded: 'DISABLE_CLOSE_OBJECT_SEARCH', aborted: 'look_for_objects'}) def moped_disable_cb(userdata, response): if response.correct != None: rospy.loginfo("DISABLE_CLOSE_OBJECT_SEARCH response: " + \ str(response.correct)) return succeeded else: return aborted smach.StateMachine.add( 'DISABLE_CLOSE_OBJECT_SEARCH', ServiceState('/iri_moped_handler/enable', enable, response_cb = moped_disable_cb, #request_key = 'object_name', request = False), transitions = {succeeded: succeeded})
def move_to_vision(): # Set pose pose = Pose() pose.orientation = Quaternion(1.00, 0.0, 0.00, 0.00) pose.position = Point(0.712, 0.316, 0.250) # Request service request_pose(pose,"left", left_group)
def name(): '''Create a ROS node and instantiate the class.''' rospy.init_node('whill_driver', anonymous=True) port = rospy.get_param('port', '/dev/ttyUSB0') TREAD = rospy.get_param('tread', 0.5) rospy.Subscriber("/cmd_vel", Twist, cmd_callback) x = y = th = 0.0 motor_speed_old = Vector3() motor_speed_old.x = motor_speed_old.y = 0.0 odom_pub = rospy.Publisher('/odom', Odometry, queue_size=1) joystick_pub = rospy.Publisher('/joystick', Vector3, queue_size=1) motor_pub = rospy.Publisher('/motor_speed', Vector3, queue_size=1) odom_broadcaster = tf.TransformBroadcaster() odom = Odometry() whill = WHILL('/dev/ttyUSB0') whill.StartSendingData(1, 100, 1) current_time = rospy.Time.now() last_time = rospy.Time.now() write_to_port_last_time = rospy.Time.now() rate = rospy.Rate(100) while not rospy.is_shutdown(): current_time = rospy.Time.now() dt = (current_time - last_time).to_sec() joy_front, joy_side, battery_power, right_motor_speed, left_motor_speed, power_on, error = whill.GetData( ) #joy_front = joy_side= battery_power= right_motor_speed= left_motor_speed= power_on= error = 0 #modify motor speed if abs(right_motor_speed - motor_speed_old.x) > 0.5: right_motor_speed = motor_speed_old.x motor_speed_old.x = right_motor_speed if abs(left_motor_speed - motor_speed_old.y) > 0.5: left_motor_speed = motor_speed_old.y motor_speed_old.y = left_motor_speed if joy_front != 0 or joy_side != 0: WHILL_JOYSTICK_ALLOWED = True else: WHILL_JOYSTICK_ALLOWED = False # compute odometry in a typical way given the velocities of the robot cv, cw, x, y, th = compute_odometry(x, y, th, right_motor_speed, left_motor_speed, dt) # since all odometry is 6DOF we'll need a quaternion created from yaw odom_quat = tf.transformations.quaternion_from_euler(0, 0, th) joystick_msg = Vector3() joystick_msg.x = joy_front joystick_msg.y = joy_side # first, we'll publish the transform over tf odom_broadcaster.sendTransform((x, y, 0.), odom_quat, current_time, "base_link", "odom") odom.header.stamp = rospy.Time.now() odom.header.frame_id = "odom" # set the position odom.pose.pose = Pose(Point(x, y, 0.), Quaternion(*odom_quat)) # set the velocity odom.child_frame_id = "base_link" odom.twist.twist = Twist(Vector3(cv, 0, 0), Vector3(0, 0, cw)) # set the motor speed motor_speed = Vector3() motor_speed.x = right_motor_speed motor_speed.y = left_motor_speed odom_pub.publish(odom) joystick_pub.publish(joystick_msg) motor_pub.publish(motor_speed) #Only writing to port every 20msec write_to_port_time = (current_time - write_to_port_last_time).to_sec() if write_to_port_time >= 0.02: # dv:velocity is in m/sec [-0.5:1.6]; dw:angular velocity rad/sec [-1:1]; joystick commands [100:-100] jv, jw = set_velocity_to_joystick_range(dv, dw) if WHILL_JOYSTICK_ALLOWED == True: whill.SetJoystick(1, 0, 0) else: whill.SetJoystick(0, jv, jw) write_to_port_last_time = current_time #whill.SetJoystick(0, int(dv*100), int(dw*-100)) rospy.sleep(0.001) last_time = current_time rate.sleep()
plt.colorbar() plt.xlabel('Horizontal rotation in degrees') plt.ylabel('Vertical tilt in degrees') plt.title('Face detection heatmap') plt.savefig("/home/ubuntu/Desktop/catkin_ws/src/ae_testing/extra/face_detect_output/20") plt.show() if __name__ == '__main__': rospy.init_node('face_detect_test_node') rospy.Subscriber('/face_cords', face_cords, face_cords_callback) rospy.Subscriber('/images_processed', Int32, img_proc_callback) #lights = ['light1', 'light2','light3','light4','light5', 'light6'] lights = ['light1','light2', 'light3'] og_pose = Pose() og_pose.position.x = -0.09 og_pose.position.y = 1.44 og_pose.position.z = 1.15 #light_pose = Pose() #light_pose.position.x = 0.07 #light_pose.position.y = -1.54 #light_pose.position.z = 0.2 runTest(og_pose, lights) #once done, return to original position: moveModel('human', og_pose.position, 0.0, 0.0, 0.0)
def poselist2pose(poselist): return Pose(Point(*poselist[0:3]), Quaternion(*poselist[3:7]))
if acc_x_raw != 0.0 : Pv_new = (Fv*Pv_old*Fv.T) + (Fu*Q*Fu.T) P_new = (Fx*P_old*Fx.T) + (Fv*Pv_new*Fu.T) Pose_cov[0] = P_new[0,0] Pose_cov[1] = P_new[0,1] Pose_cov[6] = P_new[1,0] Pose_cov[7] = P_new[1,1] Twist_cov[0] = Pv_new[0,0] Twist_cov[1] = Pv_new[0,1] Twist_cov[6] = Pv_new[1,0] Twist_cov[7] = Pv_new[1,1] odom = Odometry() odom.header.stamp = current_time odom.header.frame_id = "odom" odom.pose.pose = Pose(Point(x, y, 0.), Quaternion(*odom_quat)) odom.pose.covariance = Pose_cov odom.child_frame_id = "base_link" odom.twist.twist = Twist(Vector3(vx, vy, 0), Vector3(0, 0, vth)) odom.twist.covariance= Twist_cov odom_pub.publish(odom) Pv_old = Pv_new P_old = P_new ################################################################### r.sleep()
#!/usr/bin/env python import rospy from ar_track_alvar_msgs.msg import AlvarMarkers, AlvarMarker from geometry_msgs.msg import PoseStamped, Pose #from visualization_msgs.msg import Marker pose_msg = Pose() count = 1 def listener(): rospy.init_node('block_pose') print "Node for block pose" while not rospy.is_shutdown(): x = rospy.Subscriber('/ar_pose_marker', AlvarMarker, callback) #print "success" #publisher() #print count global count count = count + 1 print x #rospy.Subscriber("/ar_pose_marker", av_msg, callback) #av_msg = AlvarMarker() #tag_id = av_msg.id def callback(data):
def __init__(self): # Retrieve params: self._table_object_name = rospy.get_param('~table_object_name', 'table') self._grasp_object_name = rospy.get_param('~grasp_object_name', 'coke_can') self._grasp_object_width = rospy.get_param('~grasp_object_width', 0.01) self._arm_group = rospy.get_param('~arm_1', 'arm_1') #self._gripper_group = rospy.get_param('~gripper', 'gripper') self._approach_retreat_desired_dist = rospy.get_param( '~approach_retreat_desired_dist', 0.3) self._approach_retreat_min_dist = rospy.get_param( '~approach_retreat_min_dist', 0.1) # Create (debugging) publishers: self._grasps_pub = rospy.Publisher('grasps', PoseArray, queue_size=1, latch=True) self._places_pub = rospy.Publisher('places', PoseArray, queue_size=1, latch=True) # Create planning scene and robot commander: self._scene = PlanningSceneInterface() self._robot = RobotCommander() rospy.sleep(1.0) # Clean the scene: self._scene.remove_world_object(self._table_object_name) self._scene.remove_world_object(self._grasp_object_name) # Add table and Coke can objects to the planning scene: self._pose_table = self._add_table(self._table_object_name) self._pose_coke_can = self._add_coke_can(self._grasp_object_name) rospy.sleep(1.0) #----------------------------------------------------------- # Define target place pose: self._pose_place = Pose() self._pose_place.position.x = self._pose_coke_can.position.x + 0.05 self._pose_place.position.y = self._pose_coke_can.position.y self._pose_place.position.z = self._pose_coke_can.position.z - 0.1 self._pose_place.orientation = Quaternion( *quaternion_from_euler(0.0, 0.0, 0.0)) rospy.loginfo('x = %f, y = %f, z = %f', self._pose_place.position.x, self._pose_place.position.y, self._pose_place.position.z) # Retrieve groups (arm and gripper): self._arm = self._robot.get_group(self._arm_group) #self._gripper = self._robot.get_group(self._gripper_group) # Create grasp generator 'generate' action client: self._grasps_ac = SimpleActionClient( '/moveit_simple_grasps_server/generate', GenerateGraspsAction) if not self._grasps_ac.wait_for_server(rospy.Duration(5.0)): rospy.logerr('Grasp generator action client not available!') rospy.signal_shutdown( 'Grasp generator action client not available!') return # Create move group 'pickup' action client: self._pickup_ac = SimpleActionClient('/pickup', PickupAction) if not self._pickup_ac.wait_for_server(rospy.Duration(5.0)): rospy.logerr('Pick up action client not available!') rospy.signal_shutdown('Pick up action client not available!') return # Create move group 'place' action client: self._place_ac = SimpleActionClient('/place', PlaceAction) if not self._place_ac.wait_for_server(rospy.Duration(5.0)): rospy.logerr('Place action client not available!') rospy.signal_shutdown('Place action client not available!') return # Pick Coke can object: while not self._pickup(self._arm_group, self._grasp_object_name, self._grasp_object_width): rospy.logwarn('Pick up failed! Retrying ...') rospy.sleep(1.0) rospy.loginfo('Pick up successfully') # Place Coke can object on another place on the support surface (table): while not self._place(self._arm_group, self._grasp_object_name, self._pose_place): rospy.logwarn('Place failed! Retrying ...') rospy.sleep(1.0) rospy.loginfo('Place successfully')
def ik_service_client(limb = "right", use_advanced_options = False, position_xyz=[0,0,0], \ orientation_xyzw=[0,0,0,0], seed_position=[0.7, 0.4, -1.7, 1.4, -1.1, -1.6, -0.4]): """ :param limb: "right" default :param use_advanced_options: False default :param position_xyz: cartesian XYZ [0 0 0] default :param orientation_xyzw: quaternion for identifying rotation of the end :param seed_position: where to start the IK optimization :return: """ # return value class class ret: def __init__(self): self.result = False self.angle = [] # initialize an instance of return value ret = ret() # call IK service ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService" iksvc = rospy.ServiceProxy(ns, SolvePositionIK) ikreq = SolvePositionIKRequest() hdr = Header(stamp=rospy.Time.now(), frame_id='base') # target pose poses = { 'right': PoseStamped( header=hdr, pose=Pose( position=Point( x=position_xyz[0], y=position_xyz[1], z=position_xyz[2], ), orientation=Quaternion( x=orientation_xyzw[0], y=orientation_xyzw[1], z=orientation_xyzw[2], w=orientation_xyzw[3], ), ), ), } # Add desired pose for inverse kinematics ikreq.pose_stamp.append(poses[limb]) # Request inverse kinematics from base to "right_hand" link ikreq.tip_names.append('right_hand') if (use_advanced_options): # Optional Advanced IK parameters rospy.loginfo("Running Advanced IK Service Client example.") # The joint seed is where the IK position solver starts its optimization ikreq.seed_mode = ikreq.SEED_USER seed = JointState() seed.name = [ 'right_j0', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6' ] seed.position = seed_position ikreq.seed_angles.append(seed) # Once the primary IK task is solved, the solver will then try to bias the # the joint angles toward the goal joint configuration. The null space is # the extra degrees of freedom the joints can move without affecting the # primary IK task. ikreq.use_nullspace_goal.append(True) # The nullspace goal can either be the full set or subset of joint angles goal = JointState() goal.name = ['right_j1', 'right_j2', 'right_j3'] goal.position = [1, -1, 2] ikreq.nullspace_goal.append(goal) # The gain used to bias toward the nullspace goal. Must be [0.0, 1.0] # If empty, the default gain of 0.4 will be used ikreq.nullspace_gain.append(0.4) else: rospy.loginfo("Running Simple IK Service Client example.") try: rospy.wait_for_service(ns, 5.0) resp = iksvc(ikreq) except (rospy.ServiceException, rospy.ROSException), e: rospy.logerr("Service call failed: %s" % (e, )) return ret
def circleObject(self, object_string, revs=1.0, look_dock=True, clockwise=True, thresh=0): ''' object_string (string): "dock", "any_dock", "scan_buoy", "explore" ''' self.logDock("Circling the " + object_string + ".") radius = 0 # Assign these variables to either circle the dock or scan buoy object_pos = [] identify_function = self.dumbFunction dir = 1.0 if not clockwise: dir = -1.0 nav_msg_type = 0 # Choose whether to look at dock while circling or just boat around if look_dock == True: nav_msg_type = Waypoint.NAV_STATION else: nav_msg_type = Waypoint.NAV_WAYPOINT if (object_string == "dock" or object_string == "any_dock"): object_pos, _ = self.getDockPose() # Dock centroid radius = self.dock_radius elif (object_string == "scan_buoy"): object_pos, _ = self.getScanBuoyPose() # Scan buoy centroid radius = self.scan_radius elif (object_string == "explore"): # Explore! object_pos, _ = self.getExplorePose() radius = self.explore_radius '''if (object_string == "dock"): identify_function = self.checkPlacard else: identify_function = self.dumbFunction''' odom_msg = rospy.wait_for_message("/wamv/odom", Odometry) # Current odom init_vec = [ object_pos[0] - odom_msg.pose.pose.position.x, # Boat-to-object vector object_pos[1] - odom_msg.pose.pose.position.y ] init_dist = np.sqrt(init_vec[0]**2 + init_vec[1]**2) # Boat-to-object distance init_pose = Pose() # First position, at given radius from the object init_pose.position.x = (object_pos[0] + float(radius) * (-init_vec[0] / init_dist)) init_pose.position.y = (object_pos[1] + float(radius) * (-init_vec[1] / init_dist)) init_angle = math.atan2(init_vec[1], init_vec[0]) # Set angle to face object if clockwise: init_angle = init_angle + 1.5707 else: init_angle = init_angle - 1.5707 self.setPoseQuat(init_pose, init_angle) circle_wp_route = WaypointRoute() # Waypoint list circle_wps = [] #init_wp = self.makeWaypoint(init_pose, nav_type=nav_msg_type) # Create waypoint from above data #circle_wps.append(init_wp) rev_angle = revs * 2 * np.pi n_wps = int(np.ceil(revs * self.n_circle_wps)) for i in range(n_wps): wp_pose = Pose() # Set first circling waypoint pose rot = dir * (i + 1) * (rev_angle / n_wps) x = init_pose.position.x # Rotate object viewing position y = init_pose.position.y ox = object_pos[0] oy = object_pos[1] # Rotation (2x2 matrix essentially) wp_pose.position.x = ox + math.cos(rot) * ( x - ox) + math.sin(rot) * (y - oy) wp_pose.position.y = oy + -math.sin(rot) * ( x - ox) + math.cos(rot) * (y - oy) # Work out angle to object wp_vec = [ object_pos[0] - wp_pose.position.x, object_pos[1] - wp_pose.position.y ] wp_angle = math.atan2(wp_vec[1], wp_vec[0]) if clockwise: wp_angle = wp_angle + 1.5707 else: wp_angle = wp_angle - 1.5707 self.setPoseQuat(init_pose, init_angle) self.setPoseQuat(wp_pose, wp_angle) if i == n_wps - 1: # Always set last waypoint to be astation spin_wp = self.makeWaypoint(wp_pose, nav_type=Waypoint.NAV_STATION, duration=5.0) else: spin_wp = self.makeWaypoint( wp_pose, nav_type=nav_msg_type) # Set waypoint self.publishMarker(wp_pose, id=i) circle_wps.append(spin_wp) circle_wp_route.waypoints = circle_wps circle_wp_route.speed = self.general_speed self.route_pub.publish(circle_wp_route) # Start on route #r = rospy.Rate(2) # Wait until objective identified #while not identify_function() and not rospy.is_shutdown: # r.sleep() if thresh == 0: self.logDock("Waiting for wp request") self.waitForWaypointRequest() return else: conf = 0 while (conf < thresh): #TODO: Make a function to find the object base on frame_id dock = self.findClosest(self.unused_objects, type="dock") if len(dock.confidences) != 0: conf = dock.confidences[0]
import rospy from jsk_recognition_msgs.msg import TorusArray, Torus from geometry_msgs.msg import Pose import math rospy.init_node("test_torus") p = rospy.Publisher("test_torus", TorusArray) r = rospy.Rate(5) counter = 0 while not rospy.is_shutdown(): torus_array = TorusArray() torus1 = Torus() torus1.header.frame_id = "base_link" torus1.large_radius = 4 torus1.small_radius = 1 p1 = Pose() p1.position.x = 2 p1.position.z = 4 p1.orientation.x = math.sqrt(0.5) p1.orientation.y = math.sqrt(0.5) torus1.pose = p1 torus2 = Torus() torus2.header.frame_id = "base_link" torus2.large_radius = 5 torus2.small_radius = 1 p2 = Pose() p2.position.x = 3 p2.position.y = -4 p2.orientation.z = math.sqrt(0.5) p2.orientation.y = math.sqrt(0.5)
def __init__(self): # initiliaze rospy.init_node('MoveBaseSquare', anonymous=False) # tell user how to stop Robot rospy.loginfo("To stop Robot CTRL + C") # What function to call when you ctrl + c rospy.on_shutdown(self.shutdown) # How big is the square we want the robot to navigate? square_size = rospy.get_param("~square_size", 3.0) # meters # Create a list to hold the target quaternions (orientations) quaternions = list() # First define the corner orientations as Euler angles euler_angles = (pi / 2, pi, 3 * pi / 2, 0) # Then convert the angles to quaternions for angle in euler_angles: q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz') q = Quaternion(*q_angle) quaternions.append(q) # 在列表的最後加入新的元素。 # Create a list to hold the waypoint poses waypoints = list() # Append each of the four waypoints to the list. Each waypoint # is a pose consisting of a position and orientation in the map frame. waypoints.append(Pose(Point(square_size, 0.0, 0.0), quaternions[0])) waypoints.append( Pose(Point(square_size, square_size, 0.0), quaternions[1])) waypoints.append(Pose(Point(0.0, square_size, 0.0), quaternions[2])) waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3])) # Initialize the visualization markers for RViz self.init_markers() # Set a visualization marker at each waypoint for waypoint in waypoints: p = Point() p = waypoint.position self.markers.points.append(p) # Publisher to manually control the robot (e.g. to stop it, queue_size=5) # self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5) self.cmd_vel_pub = rospy.Publisher( 'navigation_velocity_smoother/raw_cmd_vel', Twist, queue_size=5) # Subscribe to the move_base action server self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction) rospy.loginfo("Waiting for move_base action server...") # Wait 60 seconds for the action server to become available self.move_base.wait_for_server(rospy.Duration(10)) rospy.loginfo("Connected to move base server") rospy.loginfo("Starting navigation test") # Initialize a counter to track waypoints i = 0 # Cycle through the four waypoints while i < 4 and not rospy.is_shutdown(): # Update the marker display self.marker_pub.publish(self.markers) # Intialize the waypoint goal goal = MoveBaseGoal() # Use the map frame to define goal poses goal.target_pose.header.frame_id = 'map' # Set the time stamp to "now" goal.target_pose.header.stamp = rospy.Time.now() # Set the goal pose to the i-th waypoint goal.target_pose.pose = waypoints[i] # Start the robot moving toward the goal self.move(goal) i += 1
# rospy.init_node('service_server') # self.my_service = rospy.Service('/get_pose_service', Empty , self.service_callback) # create the Service called get_pose_service with the defined callback # self.sub_pose = rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.sub_callback) # rospy.spin() # mantain the service open. # def service_callback(self, request): # print "Robot Pose:" # print self.robot_pose # return EmptyResponse() # the service Response class, in this case EmptyResponse # def sub_callback(self, msg): # self.robot_pose = msg.pose.pose # PoseService() robot_pose = Pose() def service_callback(request): print "Robot Pose:" print robot_pose return EmptyResponse( ) # the service Response class, in this case EmptyResponse def sub_callback(msg): global robot_pose robot_pose = msg.pose.pose rospy.init_node('service_server')
def create_path(self): #This function is used to build a list of waypoints move_group = self.move_group # --- Getting pose message wpose = Pose() waypoints = [] # --- Adding points to follow in path print(cblue + "\n\t === Acquire a trajectory === \n" + cend) print "Current directory: ", dir print "Files found: \n" print '\n'.join(files) readline.set_completer(completer)#active autocompletion on filenames readline.parse_and_bind("tab: complete") filename = raw_input(cyellow + "\nGetting path from file: " + cend) filepath = "Trajectories/" + filename #-- Test to check file availability try: way = open(filepath,'r') except IOError:# in case of mispelling filename print(cred + "Looks like the file doesnt exist" + cend) return None,None os.system('clear') print(cgreen + "\n\t > - - Niryo One - Surmoul 3D - - < \n" + cend) print("\n-> file : " + '\033[4m' + filename + cend + " - Modified : " + time.ctime(os.path.getmtime(filepath))) line = way.readline() nbr = 1 #-- Counting lines and looking for errors while line: nbr += 1 line = way.readline() data = len(line.split(' ')) if(( data != 7 and data != 8) and line != ""): print(cred + "Error line " + str(nbr) + " does not contain 3 positions + 4 quaternions values :" + cend) print "contains %d values"%(data) print line return None,None way.seek(0) debug = False if(filename == 'interactive.txt'): debug = True nbr = int(raw_input('Nbr of lines to read : ')) traveling_distance = 0 #to calculate travelling distance bar = Bar('Processing waypoints', max=nbr - 1,width=10) i = 0 distance_btwn_points = [] prev_x = 0 prev_y = 0 prev_z = 0 while(i <= nbr - 2): i+=1 bar.next() tab = way.readline().split(' ') q1 = [float(tab[3]),float(tab[4]),float(tab[5]),float(tab[6])] wpose.orientation.w = q1[0] #- Quaternion wpose.orientation.x = q1[1] wpose.orientation.y = q1[2] wpose.orientation.z = q1[3] wpose.position.x = float(tab[0]) + offsets_niryo[0] #- Position # wpose.position.y = float(tab[1]) + offsets_niryo[1] wpose.position.z = float(tab[2]) + offsets_niryo[2] distance_btwn_points.append(sqrt(pow(wpose.position.x - prev_x,2) + pow(wpose.position.y - prev_y,2) + pow(wpose.position.z - prev_z,2))) if(len(tab) == 8 ): # if gcode if(not(int(tab[7])) and (distance_btwn_points[-1] != -2) ): # if extrusion 0 then retractation distance_btwn_points[-1] = -2 #overwrite last point with retractation in mm else: traveling_distance += distance_btwn_points[-1] prev_x = wpose.position.x prev_y = wpose.position.y prev_z = wpose.position.z waypoints.append(copy.deepcopy(wpose)) #-- Outing trajectory--------------- if(divmod(i,5990)[1] == 0 or i == nbr - 1): #the trajectory is splitted in 5990 points, robot goes to a specific pose #where he can wait then will execute next part of trajectory wpose.position.x += -0.07 wpose.position.z+=0.05 wpose.orientation.x = 0 #- Quaternion wpose.orientation.y = 0.259 wpose.orientation.z = 0 wpose.orientation.w = 0.966 waypoints.append(copy.deepcopy(wpose)) distance_btwn_points.append(0) way.close() bar.finish() for i in range(0,len(distance_btwn_points)): if(divmod(i,5990)[1] == 0): distance_btwn_points[i] = 0.005 #5mm to empty and fill nozzle error_way = 0 is_error = True while(is_error): temp_way = copy.deepcopy(waypoints) is_error=False for i in range(0,len(waypoints) - 1): #- delete duplicated points in the path if(waypoints[i] == waypoints[i + 1]): del temp_way[i - error_way] error_way +=1 is_error=True #print "line %d"%(i)#-debug waypoints = copy.deepcopy(temp_way) print "-Duplicated points on path- errors fixed : %d" % (error_way) traveling_distance = round(traveling_distance * 0.209 , 2) # filament diameter 1.75mm / nozzle 0.8mm a,b = divmod(traveling_distance,1000) print "Trajectory points found: %d" % (len(waypoints) - 1) print "Approx. Necessary Filament: %2dm %3dmm" % (a,b * 100) return waypoints, distance_btwn_points
def add_order_bin(x, y): # Necessary parameters bin_width, bin_depth, bin_height = 0.65, 0.4, 0.43 interior_width, interior_depth, interior_height = 0.45, 0.29, 0.18 wall_width, wall_depth = (bin_width - interior_width) / 2, ( bin_depth - interior_depth) / 2 base_height = bin_height - interior_height objects = [] poses = [] # Create Bottom objects.append( SolidPrimitive( type=SolidPrimitive.BOX, dimensions=[bin_depth, bin_width, base_height], )) poses.append( Pose( position=Point(x=x, y=y, z=base_height / 2), orientation=Quaternion(x=0, y=0, z=0, w=1), )) # Create left side objects.append( SolidPrimitive( type=SolidPrimitive.BOX, dimensions=[bin_depth, wall_width, interior_height], )) poses.append( Pose( position=Point(x=x, y=y - interior_width / 2 - wall_width / 2, z=base_height + interior_height / 2), orientation=Quaternion(x=0, y=0, z=0, w=1), )) # Create right side objects.append( SolidPrimitive( type=SolidPrimitive.BOX, dimensions=[bin_depth, wall_width, interior_height], )) poses.append( Pose( position=Point(x=x, y=y + interior_width / 2 + wall_width / 2, z=base_height + interior_height / 2), orientation=Quaternion(x=0, y=0, z=0, w=1), )) # Create near side objects.append( SolidPrimitive( type=SolidPrimitive.BOX, dimensions=[wall_depth, interior_width, interior_height], )) poses.append( Pose( position=Point(x=x - interior_depth / 2 - wall_depth / 2, y=y, z=base_height + interior_height / 2), orientation=Quaternion(x=0, y=0, z=0, w=1), )) # Create far side objects.append( SolidPrimitive( type=SolidPrimitive.BOX, dimensions=[wall_depth, interior_width, interior_height], )) poses.append( Pose( position=Point(x=x + interior_depth / 2 + wall_depth / 2, y=y, z=base_height + interior_height / 2), orientation=Quaternion(x=0, y=0, z=0, w=1), )) co = CollisionObject() co.operation = CollisionObject.ADD co.id = "order_bin" co.header.frame_id = "/base_link" co.header.stamp = rospy.Time.now() co.primitives = objects co.primitive_poses = poses scene._pub_co.publish(co)
def __init__(self): rospy.init_node('nav_test', anonymous=True) rospy.on_shutdown(self.shutdown) # How long in seconds should the robot pause at each location? self.rest_time = rospy.get_param("~rest_time", 2) # Are we running in the fake simulator? self.fake_test = rospy.get_param("~fake_test", False) # Goal state return values goal_states = [ 'PENDING', 'ACTIVE', 'PREEMPTED', 'SUCCEEDED', 'ABORTED', 'REJECTED', 'PREEMPTING', 'RECALLING', 'RECALLED', 'LOST' ] locations = dict() #定义四个地点的位置 locations['起始点'] = Pose(Point(1.081, -0.016, 0.000), Quaternion(0.000, 0.000, 0.000, 1.000)) #locations['厨房'] = Pose(Point(3.233, 1.638, 0.000), Quaternion(00.000, 0.000, 0.000, 1.000)) locations['卧室'] = Pose(Point(3.294, 0.689, 0.000), Quaternion(0.000, 0.000, 0.000, 1.000)) locations['客厅'] = Pose(Point(6.689, 0.570, 0.000), Quaternion(0.000, 0.000, 0.000, 1.000)) self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5) # Subscribe to the move_base action server self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction) rospy.loginfo("Waiting for move_base action server...") # Wait 60 seconds for the action server to become available self.move_base.wait_for_server(rospy.Duration(60)) rospy.loginfo("Connected to move base server") # A variable to hold the initial pose of the robot to be set by # the user in RViz initial_pose = PoseWithCovarianceStamped() # Variables to keep track of success rate, running time, # and distance traveled n_loop = 0 #定义循环次数 n_locations = len(locations) n_goals = 0 n_successes = 0 i = n_locations distance_traveled = 0 start_time = rospy.Time.now() running_time = 0 location = "" last_location = "" self.Dist = 0 self.Asr = "" Loc_1 = 0 Loc_2 = 0 Name_1 = 0 name_2 = 0 Name = "" Drink_1 = 0 Drink_2 = 0 Drink_3 = 0 Drink = "" # Get the initial pose from the user (how to set) rospy.loginfo( "*** Click the 2D Pose Estimate button in RViz to set the robot's initial pose..." ) rospy.wait_for_message('initialpose', PoseWithCovarianceStamped) self.last_location = Pose() rospy.Subscriber('initialpose', PoseWithCovarianceStamped, self.update_initial_pose) # Make sure we have the initial pose while initial_pose.header.stamp == "": rospy.sleep(1) rospy.loginfo("Starting navigation test") #dict_keys = ['起始点','厨房','卧室'] dict_keys = ['起始点', '卧室'] pub1 = rospy.Publisher('/voice/xf_tts_topic', String, queue_size=5) #tts pub2 = rospy.Publisher('/voice/xf_asr_topic', Int32, queue_size=5) #asr #pub3 = rospy.Publisher('arm',String,queue_size=5) #arm #pub4 = rospy.Publisher('tracking',Int32,queue_size=5) #物体识别 pub5 = rospy.Publisher('/following', Int32, queue_size=5) #人体跟随 #pub6 = rospy.Publisher('drink_type',String,queue_size=5) #拿的物体类别 pub7 = rospy.Publisher('ros2_wake', Int32, queue_size=5) #ros2opencv2 #pub8 = rospy.Publisher('/voice/xf_nav_follow',String,queue_size=5) #nav_follow ####################################### # Begin the main loop and run through a sequence of locations while n_loop < 2: n_loop += 1 if i == n_locations: i = 0 # sequence = sample(locations, n_locations) # Skip over first location if it is the same as # the last location #if dict_keys[0] == last_location: # i = 1 ##################################### ####### Modify by XF 2017.4.14 ###### # location = sequence[i] # rospy.loginfo("location= " + str(location)) # location = locations.keys()[i] location = dict_keys[i] ##################################### # Keep track of the distance traveled. # Use updated initial pose if available. if initial_pose.header.stamp == "": distance = sqrt( pow( locations[location].position.x - locations[last_location].position.x, 2) + pow( locations[location].position.y - locations[last_location].position.y, 2)) else: rospy.loginfo("Updating current pose.") distance = sqrt( pow( locations[location].position.x - initial_pose.pose.pose.position.x, 2) + pow( locations[location].position.y - initial_pose.pose.pose.position.y, 2)) initial_pose.header.stamp = "" # Store the last location for distance calculations last_location = location rospy.loginfo("test 1 location=" + str(last_location)) # Increment the counters i += 1 rospy.loginfo("test 2 i=" + str(i)) n_goals += 1 # Set up the next goal location self.goal = MoveBaseGoal() self.goal.target_pose.pose = locations[location] self.goal.target_pose.header.frame_id = 'map' self.goal.target_pose.header.stamp = rospy.Time.now() # Let the user know where the robot is going next rospy.loginfo("Going to: " + str(location)) # Start the robot toward the next location self.move_base.send_goal(self.goal) # Allow 3 minutes to get there finished_within_time = self.move_base.wait_for_result( rospy.Duration(180)) # Check for success or failure if not finished_within_time: self.move_base.cancel_goal() rospy.loginfo("Timed out achieving goal") else: state = self.move_base.get_state() if state == GoalStatus.ABORTED: # can not find a plan,give feedback rospy.loginfo("please get out") status_n = "please get out" pub1.publish(status_n) if state == GoalStatus.SUCCEEDED: rospy.loginfo("Goal succeeded!") n_successes += 1 distance_traveled += distance rospy.loginfo("State:" + str(state)) ############ add voice by XF 4.25 ################## The 1st # pub1 = rospy.Publisher('/voice/xf_tts_topic', String, queue_size=5) loc = "主人,我已到" + str(location) pub1.publish(loc) #tts rospy.sleep(5) # rospy.Subscriber('dist',Int32,self.callbackDist) #now robot already adjust correctly # rospy.loginfo("Dist:"+str(self.Dist)) if str(location) == '起始点': commandA1 = "您好,请问有什么指示?" pub1.publish(commandA1) rospy.sleep(3) ############# now next asr ###################### awake = 1 pub2.publish(awake) #asr rospy.Subscriber('/voice/xf_asr_follow', String, self.callbackAsr) ###### Modify by XF 2017. 4.21 ######### rospy.sleep(10) rospy.loginfo("self.Asr:" + str(self.Asr)) Loc_1 = string.find( self.Asr, '卧室') #find函数,返回所查字符串在整个字符串的起始位置,如果没有,则返回-1 Loc_2 = string.find(self.Asr, '客厅') Name_1 = string.find(self.Asr, '郭晓萍') Name_2 = string.find(self.Asr, '方璐') # Drink_1=string.find(self.Asr,'可乐') # Drink_2=string.find(self.Asr,'雪碧') # Drink_3=string.find(self.Asr,'橙汁') if Loc_1 != -1: rospy.loginfo("匹配卧室") #dict_keys = ['起始点','厨房','卧室'] dict_keys = ['起始点', '卧室'] if Loc_2 != -1: rospy.loginfo("匹配客厅") #dict_keys = ['起始点','厨房','客厅'] dict_keys = ['起始点', '客厅'] if Name_1 != -1: Name = '郭晓萍' rospy.loginfo("匹配郭晓萍") if Name_2 != -1: Name = '方璐' rospy.loginfo("匹配方璐") # if Drink_1!=-1: # Drink = '可乐' # if Drink_2!=-1: # Drink = '雪碧' # if Drink_3!=-1: # Drink = '橙汁' # ros2_wake=1 # pub7.publish(ros2_wake) # pub6.publish(Drink) ####################################### rospy.sleep(5) commandA2 = "收到指示,请稍等." pub1.publish(commandA2) ################################################## The 2nd #if str(location) == '厨房': ############## add by XF 5.27 ################## # rospy.sleep(5) # command="start" # pub3.publish(command) #arm # track=1 # pub4.publish(track) #物体识别 # rospy.sleep(10) # rospy.Subscriber('dist',Int32,self.callbackDist) #now robot already adjust correctly # rospy.loginfo("Dist2:"+str(self.Dist)) # rospy.loginfo("go away Dist:"+str(self.Dist/1000.0-0.3)) # self.goal = MoveBaseGoal() # self.goal.target_pose.pose.position.x = self.Dist/1000.0-0.3 # self.goal.target_pose.pose.orientation.w = 1.0; # self.goal.target_pose.header.frame_id = 'base_footprint' # self.goal.target_pose.header.stamp = rospy.Time.now() # self.move_base.send_goal(self.goal) # rospy.sleep(5) ################################################ # commandB1="pick" # pub3.publish(commandB1) # rospy.sleep(10) # commandB2="我已发现"+str(Drink)+"并抓取." # pub1.publish(commandB2) ################################################## The 3rd if str(location) == '卧室': # rospy.sleep(5) # follow=1 # pub5.publish(follow) # track=2 #stop tracking # pub4.publish(track) # rospy.loginfo("send stop tracking command") #subscribe the angle # rospy.Subscriber('xf_xfm_node', Int32, self.callbackXFM) rospy.sleep(10) # commandC2=str(Name)+"给您"+str(Drink) commandC2 = str(Name) + "请问有什么指示" pub1.publish(commandC2) awake = 1 pub2.publish(awake) #asr rospy.sleep(5) # nav_follow="follow" #nav_follow # pub8.publish(nav_follow) follow = 1 pub5.publish(follow) rospy.sleep(5) commandC1 = "跟随已启动" pub1.publish(commandC1) # rospy.sleep(50) ################################################## if str(location) == '客厅': # rospy.sleep(5) # follow=1 # pub5.publish(follow) # track=2 #stop tracking # pub4.publish(track) # rospy.loginfo("send stop tracking command") #subscribe the angle # rospy.Subscriber('xf_xfm_node', Int32, self.callbackXFM) rospy.sleep(2) # commandC2=str(Name)+"给您"+str(Drink) commandC2 = str(Name) + "请问有什么指示" pub1.publish(commandC2) awake = 1 pub2.publish(awake) #asr rospy.sleep(5) # nav_follow="follow" #nav_follow # pub8.publish(nav_follow) follow = 1 pub5.publish(follow) rospy.sleep(5) commandC1 = "跟随已启动" pub1.publish(commandC1) # rospy.sleep(50) else: rospy.loginfo("Goal failed with error code: " + str(goal_states[state])) # How long have we been running? running_time = rospy.Time.now() - start_time running_time = running_time.secs / 60.0 # Print a summary success/failure, distance traveled and time elapsed rospy.loginfo("Success so far: " + str(n_successes) + "/" + str(n_goals) + " = " + str(100 * n_successes / n_goals) + "%") rospy.loginfo("Running time: " + str(trunc(running_time, 1)) + " min Distance: " + str(trunc(distance_traveled, 1)) + " m") rospy.sleep(self.rest_time)
def navigation_handler(self, data): """ Rebroadcasts navigation data in the following formats: 1) /odom => /base footprint transform (if enabled, as per REP 105) 2) Odometry message, with parent/child IDs as in #1 3) NavSatFix message, for systems which are knowledgeable about GPS stuff 4) IMU messages """ rospy.logdebug("Navigation received") # If we don't have a fix, don't publish anything... if self.nav_status.status == NavSatStatus.STATUS_NO_FIX: return # Changing from NED from the Applanix to ENU in ROS # Roll is still roll, since it's WRT to the x axis of the vehicle # Pitch is -ve since axis goes the other way (+y to right vs left) # Yaw (or heading) in Applanix is clockwise starting with North # In ROS it's counterclockwise startin with East orient = PyKDL.Rotation.RPY(RAD(data.roll), RAD(-data.pitch), RAD(90 - data.heading)).GetQuaternion() # UTM conversion utm_pos = geodesy.utm.fromLatLong(data.latitude, data.longitude) # Initialize starting point if we haven't yet if not self.init and self.zero_start: self.origin.x = utm_pos.easting self.origin.y = utm_pos.northing self.origin.z = data.altitude self.init = True origin_param = { "east": self.origin.x, "north": self.origin.y, "alt": self.origin.z, } rospy.set_param('/gps_origin', origin_param) # Publish origin reference for others to know about p = Pose() p.position.x = self.origin.x p.position.y = self.origin.y p.position.z = self.origin.z self.pub_origin.publish(p) # # Odometry # TODO: Work out these covariances properly from DOP # odom = Odometry() odom.header.stamp = rospy.Time.now() odom.header.frame_id = self.odom_frame odom.child_frame_id = self.base_frame odom.pose.pose.position.x = utm_pos.easting - self.origin.x odom.pose.pose.position.y = utm_pos.northing - self.origin.y odom.pose.pose.position.z = data.altitude - self.origin.z odom.pose.pose.orientation = Quaternion(*orient) odom.pose.covariance = POSE_COVAR # Twist is relative to /reference frame or /vehicle frame and # NED to ENU conversion is needed here too odom.twist.twist.linear.x = data.east_vel odom.twist.twist.linear.y = data.north_vel odom.twist.twist.linear.z = -data.down_vel odom.twist.twist.angular.x = RAD(data.ang_rate_long) odom.twist.twist.angular.y = RAD(-data.ang_rate_trans) odom.twist.twist.angular.z = RAD(-data.ang_rate_down) odom.twist.covariance = TWIST_COVAR self.pub_odom.publish(odom) t_tf = odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z q_tf = Quaternion(*orient).x, Quaternion(*orient).y, Quaternion( *orient).z, Quaternion(*orient).w # # Odometry transform (if required) # if self.publish_tf: self.tf_broadcast.sendTransform(t_tf, q_tf, odom.header.stamp, odom.child_frame_id, odom.header.frame_id) # # NavSatFix # TODO: Work out these covariances properly from DOP # navsat = NavSatFix() navsat.header.stamp = rospy.Time.now() navsat.header.frame_id = self.odom_frame navsat.status = self.nav_status navsat.latitude = data.latitude navsat.longitude = data.longitude navsat.altitude = data.altitude navsat.position_covariance = NAVSAT_COVAR navsat.position_covariance_type = NavSatFix.COVARIANCE_TYPE_UNKNOWN self.pub_navsatfix.publish(navsat) # # IMU # TODO: Work out these covariances properly # imu = Imu() imu.header.stamp = rospy.Time.now() imu.header.frame_id = self.base_frame # Orientation imu.orientation = Quaternion(*orient) imu.orientation_covariance = IMU_ORIENT_COVAR # Angular rates imu.angular_velocity.x = RAD(data.ang_rate_long) imu.angular_velocity.y = RAD(-data.ang_rate_trans) imu.angular_velocity.z = RAD(-data.ang_rate_down) imu.angular_velocity_covariance = IMU_VEL_COVAR # Linear acceleration imu.linear_acceleration.x = data.long_accel imu.linear_acceleration.y = data.trans_accel imu.linear_acceleration.z = data.down_accel imu.linear_acceleration_covariance = IMU_ACCEL_COVAR self.pub_imu.publish(imu) pass
markers = rviz_tools.RvizMarkers('/map', 'visualization_marker') while not rospy.is_shutdown(): # Axis: # Publish an axis using a numpy transform matrix T = transformations.translation_matrix((1, 0, 0)) axis_length = 0.4 axis_radius = 0.05 markers.publishAxis(T, axis_length, axis_radius, 5.0) # pose, axis length, radius, lifetime # Publish an axis using a ROS Pose Msg P = Pose(Point(2, 0, 0), Quaternion(0, 0, 0, 1)) axis_length = 0.4 axis_radius = 0.05 markers.publishAxis(P, axis_length, axis_radius, 5.0) # pose, axis length, radius, lifetime # Line: # Publish a line between two ROS Point Msgs point1 = Point(-2, 1, 0) point2 = Point(2, 1, 0) width = 0.05 markers.publishLine(point1, point2, 'green', width, 5.0) # point1, point2, color, width, lifetime # Publish a line between two ROS Poses
def __init__(self): rospy.init_node('exploring_maze', anonymous=True) rospy.on_shutdown(self.shutdown) # 在每个目标位置暂停的时间 (单位:s) self.rest_time = rospy.get_param("~rest_time", 0.5) # 发布控制机器人的消息 #用于发布当程序shutdown时机器人停止运动的命令 self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5) # 创建一个Subscriber,订阅/exploring_cmd # 这个话题从tf_move_target.cpp发布 用来表示机器人运动的三种状态 rospy.Subscriber("/exploring_cmd", Int8, self.cmdCallback) # 订阅move_base服务器的消息 self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction) rospy.loginfo("Waiting for move_base action server...") # 60s等待时间限制 self.move_base.wait_for_server(rospy.Duration(60)) rospy.loginfo("Connected to move base server") # 保存运行时间的变量 start_time = rospy.Time.now() running_time = 0 rospy.loginfo("Starting exploring maze") # 初始位置 start_location = Pose( Point(0, 0, 0), Quaternion(0.000, 0.000, 0.709016873598, 0.705191515089)) # 命令初值 self.exploring_cmd = 0 # 开始主循环,随机导航 while not rospy.is_shutdown(): #设定一个随机目标点 self.goal = MoveBaseGoal() self.goal.target_pose.pose = start_location self.goal.target_pose.header.frame_id = 'map' self.goal.target_pose.header.stamp = rospy.Time.now() if self.exploring_cmd is STATUS_EXPLORING: self.goal.target_pose.pose.position.x = random.randint(0, 8) self.goal.target_pose.pose.position.y = random.randint(0, 9) elif self.exploring_cmd is STATUS_CLOSE_TARGET: rospy.sleep(0.1) continue elif self.exploring_cmd is STATUS_GO_HOME: self.goal.target_pose.pose.position.x = 0 self.goal.target_pose.pose.position.y = 0 rospy.loginfo("Going to :" + str(self.goal.target_pose.pose)) self.move_base.send_goal(self.goal) finished_within_time = self.move_base.wait_for_result( rospy.Duration(300)) # 查看是否成功到达 if not finished_within_time: self.move_base.cancel_goal() rospy.loginfo("Timed out achieving goal") else: state = self.move_base.get_state() if state == GoalStatus.SUCCEEDED: rospy.loginfo("Goal succeeded!") else: rospy.loginfo("Goal failed") # 运行所用时间 running_time = rospy.Time.now() - start_time running_time = running_time.secs / 60.0 if self.exploring_cmd is STATUS_GO_HOME: break else: rospy.sleep(self.rest_time) # 输出本次导航的所有信息 rospy.loginfo("Current time: " + str(trunc(running_time, 1) + "min") self.shutdown()
def pr2_mover(object_list): # TODO: Initialize variables test_scene_num = Int32() object_name = String() arm_name = String() pick_pose = Pose() place_pose = Pose() dict_list = [] test_scene_num.data = 3 labels = [] centroids = [] yaml_filename = 'output_3.yaml' # TODO: Get/Read parameters object_list_param = rospy.get_param('/object_list') dropbox_param = rospy.get_param('/dropbox') # TODO: Parse parameters into individual variables for obj in object_list: #print obj.label labels.append(obj.label) points_arr = ros_to_pcl(obj.cloud).to_array() centroids.append(np.mean(points_arr, axis=0)[:3]) for i in range(0, len(object_list_param)): object_name.data = object_list_param[i]['name'] object_group = object_list_param[i]['group'] # TODO: Rotate PR2 in place to capture side tables for the collision map # TODO: Loop through the pick list for j in range(0, len(labels)): if object_name.data == labels[j]: pick_pose.position.x = np.asscalar(centroids[j][0]) pick_pose.position.y = np.asscalar(centroids[j][1]) pick_pose.position.z = np.asscalar(centroids[j][2]) #print pick_pose # TODO: Get the PointCloud for a given object and obtain it's centroid # TODO: Create 'place_pose' for the object for j in range(0, len(dropbox_param)): if object_group == dropbox_param[j]['group']: place_pose.position.x = dropbox_param[j]['position'][0] place_pose.position.y = dropbox_param[j]['position'][1] place_pose.position.z = dropbox_param[j]['position'][2] # TODO: Assign the arm to be used for pick_place if object_group == 'green': arm_name.data = 'right' elif object_group == 'red': arm_name.data = 'left' # TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format # Populate various ROS messages yaml_dict = make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose) dict_list.append(yaml_dict) # Wait for 'pick_place_routine' service to come up rospy.wait_for_service('pick_place_routine') try: pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace) # TODO: Insert your message variables to be sent as a service request resp = pick_place_routine(test_scene_num, arm_name, object_name, pick_pose, place_pose) print("Response: ", resp.success) except rospy.ServiceException, e: print "Service call failed: %s" % e
#publisher_marker = rospy.Publisher('/visualization_marker', Marker, queue_size=10) publisher_marray = rospy.Publisher('/upper_body_detector/marker_array', MarkerArray, queue_size=10) rate=rospy.Rate(4.9) counter=0 people_inside=False while not rospy.is_shutdown(): j = PoseArray() j.header = Header() #j.header.stamp = {'secs' : datetime.datetime.now().time().second , 'nsecs' : datetime.datetime.now().time().microsecond} now = rospy.Time.now() j.header.stamp.secs = now.secs j.header.stamp.nsecs = now.nsecs j.header.frame_id = '/camera_link' pose1 = Pose() pose1.position.x = now.secs % 10 pose1.position.y = 0.1 pose1.position.z = 0.9 pose1.orientation.w = 1.0 pose2 = Pose() pose2.position.x = 1 pose2.position.y = -0.1 pose2.position.z = 0.9 pose2.orientation.w = 1.0 markerPeople = Marker() markerPeople.header = Header() markerPeople.header.stamp.secs = now.secs markerPeople.header.stamp.nsecs = now.nsecs markerPeople.header.frame_id = '/camera_link'
def __init__(self): """ Initialize the SCARA environemnt NOTE: This environment uses ROS and interfaces. TODO: port everything to ROS 2 natively """ # Launch the simulation with the given launchfile name gazebo_env.GazeboEnv.__init__(self, "MAIRATop3DOF_v0.launch") # TODO: cleanup this variables, remove the ones that aren't used # class variables self._observation_msg = None self.scale = None # must be set from elsewhere based on observations self.bias = None self.x_idx = None self.obs = None self.reward = None self.done = None self.reward_dist = None self.reward_ctrl = None self.action_space = None self.max_episode_steps = 1000 # now used in all algorithms self.iterator = 0 # default to seconds self.slowness = 1 self.slowness_unit = 'sec' self.reset_jnts = True self._time_lock = threading.RLock() ############################# # Environment hyperparams ############################# # target, where should the agent reach EE_POS_TGT = np.asmatrix([-0.4, 0.0, 1.1013]) # 200 cm from the z axis # EE_POS_TGT = np.asmatrix([0.0, 0.001009, 1.64981]) # EE_POS_TGT = np.asmatrix([0.3305805, -0.1326121, 0.4868]) # center of the H # EE_ROT_TGT = np.asmatrix([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]) EE_ROT_TGT = np.asmatrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) EE_POINTS = np.asmatrix([[0, 0, 0]]) EE_VELOCITIES = np.asmatrix([[0, 0, 0]]) # Initial joint position INITIAL_JOINTS = np.array([0., 0., 0., 0., 0, 0]) # Used to initialize the robot, #TODO, clarify this more # STEP_COUNT = 2 # Typically 100. # slowness = 10000000 # 10 ms, where 1 second is real life simulation # slowness = 1000000 # 1 ms, where 1 second is real life simulation # slowness = 1 # use >10 for running trained network in the simulation # slowness = 10 # use >10 for running trained network in the simulation # Topics for the robot publisher and subscriber. JOINT_PUBLISHER = '/maira_controller/command' JOINT_SUBSCRIBER = '/maira_controller/state' # joint names: MOTOR1_JOINT = 'motor1' MOTOR2_JOINT = 'motor2' MOTOR3_JOINT = 'motor3' MOTOR4_JOINT = 'motor4' MOTOR5_JOINT = 'motor5' MOTOR6_JOINT = 'motor6' # Set constants for links TABLE = 'table' BASE = 'base_link' MAIRA_MOTOR1_LINK = 'motor1_link' MAIRA_MOTOR2_LINK = 'motor2_link' MAIRA_MOTOR3_LINK = 'motor3_link' MAIRA_MOTOR4_LINK = 'motor4_link' MAIRA_MOTOR5_LINK = 'motor5_link' MAIRA_MOTOR6_LINK = 'motor6_link' EE_LINK = 'ee_link' # EE_LINK = 'ee_link' JOINT_ORDER = [ MOTOR1_JOINT, MOTOR2_JOINT, MOTOR3_JOINT, MOTOR4_JOINT, MOTOR5_JOINT, MOTOR6_JOINT ] LINK_NAMES = [ TABLE, BASE, MAIRA_MOTOR1_LINK, MAIRA_MOTOR2_LINK, MAIRA_MOTOR3_LINK, MAIRA_MOTOR4_LINK, MAIRA_MOTOR5_LINK, MAIRA_MOTOR6_LINK, EE_LINK ] reset_condition = { 'initial_positions': INITIAL_JOINTS, 'initial_velocities': [] } ############################# # TODO: fix this and make it relative # Set the path of the corresponding URDF file from "assets" URDF_PATH = rospkg.RosPack().get_path( "maira_description") + "/urdf/maira_demo_camera_top.urdf" m_joint_order = copy.deepcopy(JOINT_ORDER) m_link_names = copy.deepcopy(LINK_NAMES) m_joint_publishers = copy.deepcopy(JOINT_PUBLISHER) m_joint_subscribers = copy.deepcopy(JOINT_SUBSCRIBER) ee_pos_tgt = EE_POS_TGT ee_rot_tgt = EE_ROT_TGT # Initialize target end effector position ee_tgt = np.ndarray.flatten( get_ee_points(EE_POINTS, ee_pos_tgt, ee_rot_tgt).T) self.realgoal = ee_tgt self.target_orientation = ee_rot_tgt self.environment = { # rk changed this to for the mlsh # 'ee_points_tgt': ee_tgt, 'ee_points_tgt': self.realgoal, 'ee_point_tgt_orient': self.target_orientation, 'joint_order': m_joint_order, 'link_names': m_link_names, # 'slowness': slowness, 'reset_conditions': reset_condition, 'tree_path': URDF_PATH, 'joint_publisher': m_joint_publishers, 'joint_subscriber': m_joint_subscribers, 'end_effector_points': EE_POINTS, 'end_effector_velocities': EE_VELOCITIES, } # self.spec = {'timestep_limit': 5, 'reward_threshold': 950.0,} # Subscribe to the appropriate topics, taking into account the particular robot # ROS 1 implementation self._pub = rospy.Publisher(JOINT_PUBLISHER, JointTrajectory) self._sub = rospy.Subscriber(JOINT_SUBSCRIBER, JointTrajectoryControllerState, self.observation_callback) # Initialize a tree structure from the robot urdf. # note that the xacro of the urdf is updated by hand. # The urdf must be compiled. _, self.ur_tree = treeFromFile(self.environment['tree_path']) # Retrieve a chain structure between the base and the start of the end effector. self.scara_chain = self.ur_tree.getChain( self.environment['link_names'][0], self.environment['link_names'][-1]) # print("nr of jnts: ", self.scara_chain.getNrOfJoints()) # Initialize a KDL Jacobian solver from the chain. self.jac_solver = ChainJntToJacSolver(self.scara_chain) #print(self.jac_solver) self._observations_stale = [False for _ in range(1)] #print("after observations stale") self._currently_resetting = [False for _ in range(1)] self.reset_joint_angles = [None for _ in range(1)] # TODO review with Risto, we might need the first observation for calling _step() # observation = self.take_observation() # assert not done # self.obs_dim = observation.size """ obs_dim is defined as: num_dof + end_effector_points=3 + end_effector_velocities=3 end_effector_points and end_effector_velocities is constant and equals 3 recently also added quaternion to the obs, which has dimension=4 """ # self.obs_dim = self.scara_chain.getNrOfJoints( ) + 10 #6 hardcode it for now # # print(observation, _reward) # # Here idially we should find the control range of the robot. Unfortunatelly in ROS/KDL there is nothing like this. # # I have tested this with the mujoco enviroment and the output is always same low[-1.,-1.], high[1.,1.] # #bounds = self.model.actuator_ctrlrange.copy() low = -np.pi / 2.0 * np.ones(self.scara_chain.getNrOfJoints()) high = np.pi / 2.0 * np.ones(self.scara_chain.getNrOfJoints()) # low = -np.pi * np.ones(self.scara_chain.getNrOfJoints()) # high = np.pi * np.ones(self.scara_chain.getNrOfJoints()) # low = -np.inf * np.ones(self.scara_chain.getNrOfJoints()) # high = np.inf * np.ones(self.scara_chain.getNrOfJoints()) # print("Action spaces: ", low, high) self.action_space = spaces.Box(low, high) high = np.inf * np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self.add_model = rospy.ServiceProxy('/gazebo/spawn_urdf_model', SpawnModel) self.remove_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel) model_xml = "<?xml version=\"1.0\"?> \ <robot name=\"myfirst\"> \ <link name=\"world\"> \ </link>\ <link name=\"cylinder0\">\ <visual>\ <geometry>\ <sphere radius=\"0.01\"/>\ </geometry>\ <origin xyz=\"0 0 0\"/>\ <material name=\"rojotransparente\">\ <ambient>0.5 0.5 1.0 0.1</ambient>\ <diffuse>0.5 0.5 1.0 0.1</diffuse>\ </material>\ </visual>\ <inertial>\ <mass value=\"5.0\"/>\ <inertia ixx=\"1.0\" ixy=\"0.0\" ixz=\"0.0\" iyy=\"1.0\" iyz=\"0.0\" izz=\"1.0\"/>\ </inertial>\ </link>\ <joint name=\"world_to_base\" type=\"fixed\"> \ <origin xyz=\"0 0 0\" rpy=\"0 0 0\"/>\ <parent link=\"world\"/>\ <child link=\"cylinder0\"/>\ </joint>\ <gazebo reference=\"cylinder0\">\ <material>Gazebo/GreenTransparent</material>\ </gazebo>\ </robot>" robot_namespace = "" pose = Pose() pose.position.x = EE_POS_TGT[0, 0] pose.position.y = EE_POS_TGT[0, 1] pose.position.z = EE_POS_TGT[0, 2] #Static obstacle (not in original code) # pose.position.x = 0.25;# # pose.position.y = 0.07;# # pose.position.z = 0.0;# pose.orientation.x = 0 pose.orientation.y = 0 pose.orientation.z = 0 pose.orientation.w = 0 reference_frame = "" rospy.wait_for_service('/gazebo/spawn_urdf_model') self.add_model(model_name="target", model_xml=model_xml, robot_namespace="", initial_pose=pose, reference_frame="") # Seed the environment # Seed the environment self._seed()
def start(self): pose = Pose(orientation=Quaternion(0, 0, 1, 1)) gripper_im = create_gripper_interactive_marker(pose, pregrasp=False) self._im_server.insert(gripper_im, feedback_cb=self.handle_feedback) self._im_server.applyChanges()
def main(): """RSDK Inverse Kinematics Pick and Place Example A Pick and Place example using the Rethink Inverse Kinematics Service which returns the joint angles a requested Cartesian Pose. This ROS Service client is used to request both pick and place poses in the /base frame of the robot. Note: This is a highly scripted and tuned demo. The object location is "known" and movement is done completely open loop. It is expected behavior that Baxter will eventually mis-pick or drop the block. You can improve on this demo by adding perception and feedback to close the loop. """ rospy.init_node("ik_pick_and_place_demo") # Load Gazebo Models via Spawning Services # Note that the models reference is the /world frame # and the IK operates with respect to the /base frame load_gazebo_models() # Remove models from the scene on shutdown rospy.on_shutdown(delete_gazebo_models) # Wait for the All Clear from emulator startup rospy.wait_for_message("/robot/sim/started", Empty) limb = 'left' hover_distance = 0.15 # meters # Starting Joint angles for left arm starting_joint_angles = { 'left_w0': 0.6699952259595108, 'left_w1': 1.030009435085784, 'left_w2': -0.4999997247485215, 'left_e0': -1.189968899785275, 'left_e1': 1.9400238130755056, 'left_s0': -0.08000397926829805, 'left_s1': -0.9999781166910306 } pnp = PickAndPlace(limb, hover_distance) # An orientation for gripper fingers to be overhead and parallel to the obj overhead_orientation = Quaternion(x=-0.0249590815779, y=0.999649402929, z=0.00737916180073, w=0.00486450832011) red_path = [] red_pick = Pose(position=Point(x=0.78, y=0.052, z=-0.17), orientation=overhead_orientation) red_path.append( Pose(position=Point(x=0.779563403556, y=0.0468261425935, z=0.156573808561), orientation=overhead_orientation)) red_path.append( Pose(position=Point(x=0.75327939756, y=0.588635078274, z=0.166430469606), orientation=overhead_orientation)) red_path.append( Pose(position=Point(x=0.342029905909, y=0.568679958534, z=0.164408812494), orientation=overhead_orientation)) red_path.append( Pose(position=Point(x=0.196601394977, y=0.972433777558, z=0.171094782565), orientation=overhead_orientation)) red_path.append( Pose(position=Point(x=0.00949742588346, y=1.02707664241, z=0.171318553432), orientation=overhead_orientation)) red_place = Pose(position=Point(x=0.00949742588346, y=1.02707664241, z=-0.17), orientation=overhead_orientation) # Move to the desired starting angles pnp.move_to_start(starting_joint_angles) pnp.pick(red_pick) for path in red_path: pnp._servo_to_pose(path) pnp.place(red_place) white_path = [] white_pick = Pose(position=Point(x=0.33, y=0.8235, z=-0.16), orientation=overhead_orientation) white_path.append( Pose(position=Point(x=0.327241727039, y=0.825381946329, z=0.1), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.327241727039, y=0.825381946329, z=0.196014205072), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.327241727039, y=0.825381946329, z=0.199), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.599122653199, y=0.83927825659, z=0.200216051149), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.671941591494, y=0.691381023315, z=0.200888181225), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.797319017312, y=0.539403288689, z=0.237312486625), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.797179252216, y=0.377318604737, z=0.251622162923), orientation=overhead_orientation)) white_path.append( Pose(position=Point(x=0.794511987383, y=0.120989091685, z=0.259153455), orientation=overhead_orientation)) white_place = Pose(position=Point(x=0.794511987383, y=0.120989091685, z=-0.17), orientation=overhead_orientation) pnp.pick(white_pick) for path in white_path: pnp._servo_to_pose(path) pnp.place(white_place)
def __init__(self): self.object_name = "box" self.object_width = 0.03 self.arm_group = "irb_120" self.gripper_group = "robotiq_85" self.arm = moveit_commander.MoveGroupCommander("irb_120") self.gripper = moveit_commander.MoveGroupCommander("robotiq_85") self.scene = PlanningSceneInterface() self.robot = RobotCommander() rospy.sleep(1.0) self.scene.remove_world_object(self.object_name) self.pose_object = self.add_object(self.object_name) rospy.sleep(1.0) self.pose_object.position.z += 0.16 print(self.pose_object.position) self.pose_place = Pose() self.pose_place.position.x = self.pose_object.position.x self.pose_place.position.y = self.pose_object.position.y - 0.1 self.pose_place.position.z = 0 #self.pose_object.position.z - 0.1 self.pose_place.orientation = Quaternion( *quaternion_from_euler(0.0, 0.0, 0.0)) self.approach_retreat_desired_dist = 0.2 self.approach_retreat_min_dist = 0.1 #self.grasp_action = SimpleActionClient('/moveit_simple_grasps_server/generate', GenerateGraspsAction) # self.pickup_action = SimpleActionClient("/pickup", PickupAction) # self.place_action = SimpleActionClient("/place", PlaceAction) print("start pick and place") # Pick # while not self.pickup(self.arm_group, self.object_name, self.object_width): # rospy.logwarn('Pick up failed! Retrying ...') # rospy.sleep(1.0) grasps = self.generate_grasps(self.pose_object, self.object_width) self.arm.pick(self.object_name, grasps) rospy.loginfo('Pick up successfully') rospy.sleep(1) # self.arm.detach_object(self.object_name) # self.clean_scene() # print("pose_place: ", self.pose_place) # Place # while not self.place(self.arm_group, self.object_name, self.pose_place): # rospy.logwarn('Place failed! Retrying ...') # rospy.sleep(1.0) place = self.generate_places(self.pose_place) self.arm.place(self.object_name, place) rospy.loginfo('Place successfully') self.move_joint_hand(0) rospy.loginfo("Moving arm to HOME point") self.move_pose_arm(0, 0.8, 0, 0.4, 0, 0.6) rospy.sleep(1) self.arm.detach_object(self.object_name) self.clean_scene()
def __init__(self): # read settings self._samples_before_reset = rospy.get_param('~samples_before_reset', 50) self._send_reset_automatically = rospy.get_param( '~send_reset_automatically', False) # this is a crucial node, be verbose per default self._verbose = rospy.get_param('~verbose', True) # quaternion from IMU of the camera-sensor (C frame) to IMU of the MAV (I frame) q_I_C_w = rospy.get_param('~pose_sensor/init/q_ic/w', 1.0) q_I_C_x = rospy.get_param('~pose_sensor/init/q_ic/x', 0.0) q_I_C_y = rospy.get_param('~pose_sensor/init/q_ic/y', 0.0) q_I_C_z = rospy.get_param('~pose_sensor/init/q_ic/z', 0.0) q_I_C = [q_I_C_x, q_I_C_y, q_I_C_z, q_I_C_w] # position of IMU of the camera-sensor (C frame) from IMU of the MAV (I frame) # yes, it's the other way around respect the quaternion ... I_p_I_C_x = rospy.get_param('~pose_sensor/init/p_ic/x', 0.0) I_p_I_C_y = rospy.get_param('~pose_sensor/init/p_ic/y', 0.0) I_p_I_C_z = rospy.get_param('~pose_sensor/init/p_ic/z', 0.0) I_p_I_C = [I_p_I_C_x, I_p_I_C_y, I_p_I_C_z] # position of GPS antenna (V frame, accordin to Rovio) # with respect to MAV IMU (I frame, accordin to MSF) self._I_p_I_V = rospy.get_param('~mavimu_p_mavimu_gps', [0.0, 0.0, 0.0]) # full transformation from MAV IMU (I) to sensor IMU (C) # do first translation and then rotation! self._T_I_C = tf.concatenate_matrices(tf.translation_matrix(I_p_I_C), tf.quaternion_matrix(q_I_C)) if self._verbose: rospy.loginfo( rospy.get_name() + ": transformation from vi-sensor IMU and MAV IMU [x, y, z, w]: " + str(q_I_C)) if self._send_reset_automatically: rospy.loginfo(rospy.get_name() + ": reset will be sent after " + str(self._samples_before_reset) + " of IMU messages") else: rospy.loginfo( rospy.get_name() + ": reset has to be sent by calling service 'send_reset_to_rovio' " ) # init other variables self._num_imu_msgs_read = 0 self._num_gps_transform_msgs_read = 0 self._Enu_p_Enu_V = [0.0, 0.0, 0.0] self._automatic_rovio_reset_sent_once = False self._pose_world_imu_msg = Pose() self._T_Enu_I = tf.identity_matrix() # allow testing in Vicon testing_in_vicon = rospy.get_param('~testing_in_vicon', False) if testing_in_vicon: # manually set this paramter if testing in Vicon # it will be overwritten in any case after the first external GPS measurement self._Enu_p_Enu_V = rospy.get_param('~vicon_p_vicon_gps', [0.0, 0.0, 0.0]) self._num_gps_transform_msgs_read = 1 # advertise service self._reset_rovio_srv_server = rospy.Service( rospy.get_name() + '/send_reset_to_rovio', std_srvs.srv.Trigger, self.send_reset_to_rovio_service_callback) # subscribe to Imu topic which contains the yaw orientation rospy.Subscriber(rospy.get_name() + "/mag_imu", Imu, self.mag_imu_callback) # use either gps_transform or gps_pose rospy.Subscriber(rospy.get_name() + "/gps_transform", TransformStamped, self.gps_transform_callback) rospy.Subscriber(rospy.get_name() + "/gps_pose", PoseWithCovarianceStamped, self.gps_pose_callback) rospy.spin()
def setPose(self): # get goal from commandline for i in range(0, len(sys.argv)): if sys.argv[i] == '-update_rate': if len(sys.argv) > i + 1: self.update_rate = float(sys.argv[i + 1]) if sys.argv[i] == '-timeout': if len(sys.argv) > i + 1: self.timeout = float(sys.argv[i + 1]) if sys.argv[i] == '-x': if len(sys.argv) > i + 1: self.target_p[0] = float(sys.argv[i + 1]) if sys.argv[i] == '-y': if len(sys.argv) > i + 1: self.target_p[1] = float(sys.argv[i + 1]) if sys.argv[i] == '-z': if len(sys.argv) > i + 1: self.target_p[2] = float(sys.argv[i + 1]) if sys.argv[i] == '-R': if len(sys.argv) > i + 1: self.target_e[0] = float(sys.argv[i + 1]) if sys.argv[i] == '-P': if len(sys.argv) > i + 1: self.target_e[1] = float(sys.argv[i + 1]) if sys.argv[i] == '-Y': if len(sys.argv) > i + 1: self.target_e[2] = float(sys.argv[i + 1]) if sys.argv[i] == '-f': if len(sys.argv) > i + 1: self.frame_id = sys.argv[i + 1] if sys.argv[i] == '-s': if len(sys.argv) > i + 1: self.service_name = sys.argv[i + 1] self.use_service = True if sys.argv[i] == '-t': if len(sys.argv) > i + 1: self.topic_name = sys.argv[i + 1] self.use_topic = True if sys.argv[i] == '-p': if len(sys.argv) > i + 1: self.wait_topic_name = sys.argv[i + 1] self.wait_for_topic = True # setup rospy self.pub_set_pose_topic = rospy.Publisher(self.topic_name, Odometry) rospy.Subscriber(self.wait_topic_name, rospy.AnyMsg, self.waitTopicInput) # wait for topic if user requests if self.wait_for_topic: while not self.wait_topic_initialized: time.sleep(0.1) # compoose goal message h = rospy.Header() h.stamp = rospy.get_rostime() h.frame_id = self.frame_id p = Point(self.target_p[0], self.target_p[1], self.target_p[2]) tmpq = tft.quaternion_from_euler(self.target_e[0], self.target_e[1], self.target_e[2]) q = Quaternion(tmpq[0], tmpq[1], tmpq[2], tmpq[3]) pose = Pose(p, q) pwc = PoseWithCovariance(pose, COV) twc = TwistWithCovariance(Twist(Vector3(), Vector3()), COV) child_frame_id = "" # what should this be? target_pose = Odometry(h, child_frame_id, pwc, twc) if self.use_service: success = self.setPoseService(target_pose) # publish topic if specified if self.use_topic: timeout_t = time.time() + self.timeout while not rospy.is_shutdown() and time.time() < timeout_t: # publish target pose self.pub_set_pose_topic.publish(target_pose) if self.update_rate > 0: time.sleep(1.0 / self.update_rate) else: time.sleep(0.001)
def get_particle_msgs(pfilter, time): """ Create messages to visualize particle filters. First message contains all particles. Second message contains the particle representing the whole filter. :param ParticleFilter pfilter: the particle filter. :param rospy.Time time: the timestamp for the message. :returns: a list of messages containing [all the particles positions, the mean particle, the mean odometry, the translation to the mean particle, the rotation to the mean particle, the weights] :rtype: :py:obj:`list` """ # Pose array msg = PoseArray() msg.header.stamp = time msg.header.frame_id = 'world' # Weights as spheres msg_weight = MarkerArray() idx = 0 wmax = pfilter.p_wei.max() wmin = pfilter.p_wei.min() if wmax == wmin: wmin = 0.0 for i in range(pfilter.num): # Pose m = Pose() m.position.x = pfilter.p_xy[0, i] m.position.y = pfilter.p_xy[1, i] m.position.z = 0 quat = quaternion_from_euler(0, 0, pfilter.p_ang[i]) m.orientation.x = quat[0] m.orientation.y = quat[1] m.orientation.z = quat[2] m.orientation.w = quat[3] # Append msg.poses.append(m) # Marker constant marker = Marker() marker.header.frame_id = "world" marker.header.stamp = time marker.ns = "weights" marker.type = marker.SPHERE marker.action = marker.ADD marker.color.a = 0.5 marker.color.r = 1.0 marker.color.g = 0.55 marker.color.b = 0.0 # MArker variable marker.id = idx marker.pose.position.x = pfilter.p_xy[0, i] marker.pose.position.y = pfilter.p_xy[1, i] marker.pose.position.z = 0 marker.pose.orientation.x = quat[0] marker.pose.orientation.y = quat[1] marker.pose.orientation.z = quat[2] marker.pose.orientation.w = quat[3] scale = 0.005 + 0.08 * (pfilter.p_wei[i] - wmin) / (wmax - wmin) marker.scale.x = scale marker.scale.y = scale marker.scale.z = 0.02 idx += 1 msg_weight.markers.append(marker) # Pose Stamped msg_mean = PoseStamped() msg_mean.header.stamp = time msg_mean.header.frame_id = 'world' particle = pfilter.get_mean_particle() msg_mean.pose.position.x = particle[0] msg_mean.pose.position.y = particle[1] msg_mean.pose.position.z = 0 quat = quaternion_from_euler(0, 0, particle[2]) msg_mean.pose.orientation.x = quat[0] msg_mean.pose.orientation.y = quat[1] msg_mean.pose.orientation.z = quat[2] msg_mean.pose.orientation.w = quat[3] # Odometry msg_odom = Odometry() msg_odom.header.stamp = time msg_odom.header.frame_id = 'world' msg_odom.pose.pose.position = msg_mean.pose.position msg_odom.pose.pose.orientation = msg_mean.pose.orientation # TF trans = (msg_odom.pose.pose.position.x, msg_odom.pose.pose.position.y, msg_odom.pose.pose.position.z) rotat = (msg_odom.pose.pose.orientation.x, msg_odom.pose.pose.orientation.y, msg_odom.pose.pose.orientation.z, msg_odom.pose.pose.orientation.w) return msg, msg_mean, msg_odom, trans, rotat, msg_weight
def draw_blob(l_vectors, fromZeroZero): # start_p = [inorder[0], [from 00 to minY,minX]] global g_limb, g_position_neutral, g_orientation_hand_down, pos, posp global gripper global marker_p, marker_q global square_p, square_q rospy.sleep(2) gripper.open() marker_pose = Pose() marker_pose.position = marker_p marker_pose.orientation = marker_q fancy_pose = Pose() fancy_pose.position = square_p fancy_pose.orientation = square_q move_to(marker_pose, 0.3, 5) rospy.sleep(2.0) gripper.close() rospy.sleep(2) marker_pose.position.z += 0.1 move_to(marker_pose, 0.2, 5) rospy.sleep(1) fancy_pose.position.z += 0.1 move_to(fancy_pose, 0.3, 5) cur_pose = Pose() cur_pose.orientation = fancy_pose.orientation cur_pose.position = fancy_pose.position print("In draw blob") while (True): # calculate new position cur_pose.position.x = fancy_pose.position.x + math.sin( fromZeroZero[0]) * fromZeroZero[1] cur_pose.position.y = fancy_pose.position.y + math.cos( fromZeroZero[0]) * fromZeroZero[1] cur_pose.position.z = cur_pose.position.z # move to first pose move_to(cur_pose, 0.15, 5) cur_pose.position.z -= 0.21 move_to(cur_pose, 0.15, 5) for i in range(0, len(l_vectors) - 1): cur_pose.position.x = fancy_pose.position.x + math.sin( l_vectors[i][0]) * l_vectors[i][1] cur_pose.position.y = fancy_pose.position.y + math.cos( l_vectors[i][0]) * l_vectors[i][1] cur_pose.position.z = cur_pose.position.z move_to(cur_pose, 0.15, 5) break
# still problems with the planner in here # move arm in front of bin before looking for the object sm.add('move_arm_to_bin', MoveRobotState(movegroup='left_arm'), transitions={'succeeded':'turn_off_the_shelf_collision_scene', 'failed':'abort_state'}) sm.add('turn_off_the_shelf_collision_scene', ToggleBinFillersAndTote(action='unfill_bins'), transitions={'succeeded':'move_sucker_in', 'failed': 'abort_state'}) # TODO probably would need to look into the bin to see where in the bin there is space :) # move the object into the shelf bin sm.add('move_sucker_in', MoveRobotToRelativePose(movegroup='left_arm', pose_frame_id='/shelf', # TODO fix that based on the posiiton we are at! relative_pose=Pose(position=Point(0,0,relative_movement), orientation=Quaternion(0,0,0,1)), velocity_scale=0.5), transitions={'succeeded':'deactivate_suction', 'failed':'abort_state'}) # deactivate suction to let go of the object sm.add('deactivate_suction', SuctionState(state='off', movegroup=None), transitions={'succeeded':'remove_object_collision', 'failed':'abort_state'}) sm.add('remove_object_collision', UpdateCollisionState(action='detach'), transitions={'succeeded':'move_sucker_back_out'}) # sm.add('remove_current_pick', PopItemState(), # transitions={'succeeded':'move_to_neutral'}) sm.add('move_sucker_back_out', MoveRobotToRelativePose(
def update_odom(self): t2 = rospy.Time.now() t1 = self.prev_time dt = (t2 - t1).to_sec() gyro_thresh = 0.01 #0.05 g_bias = 0.007 MAX_DTHETA_GYRO = 5 try: self.ardIMU.safe_write('A5/6/') gyroz = self.ardIMU.safe_read() #c = 0 #print("gyro z:") #print(gyroz) #while(gyroz == '' and c < 10): # c = c+1 # gyroz = ardIMU.safe_read() # print('try gyro again') gyroz = float(int(gyroz) - 1000) / 100.0 except: gyroz = 0 print 'IMU error' print "Unexpected Error:", sys.exc_info()[0] finally: a = 0 # Read encoder delta try: self.ard.safe_write('A3/4/') s = self.ard.safe_read() #print("enc: ") #print(s) c = 0 #while(s == '' and c < 10): # c = c +1 # s = ardIMU.safe_read() # print('try enc again') delta_enc_counts = int(s) except: delta_enc_counts = 0 print 'enc error' print "Unexpected Error:", sys.exc_info()[0] finally: a = 0 # Update odom self.enc_total = self.enc_total + delta_enc_counts dmeters = float(delta_enc_counts) / 53.0 #53 counts/meter if (abs(gyroz + g_bias) < gyro_thresh): gz_dps = 0 dtheta_gyro_deg = 0 else: gz_dps = (gyroz + g_bias) * 180 / 3.14 #if(gz_dps > 0): # gz_dps = gz_dps * 1.2 #else: # gz_dps = gz_dps * 1.1 dtheta_gyro_deg = gz_dps * dt if (abs(dtheta_gyro_deg) > MAX_DTHETA_GYRO): #print 'no gyro' dtheta_deg = 0 use_gyro_flag = False else: #print 'use gyro' dtheta_deg = dtheta_gyro_deg use_gyro_flag = True #update bot position self.bot.move(dmeters, dtheta_deg, use_gyro_flag) self.bot.servo_deg = 0 # update bot linear x velocity every 150 msec # need to use an np array, then push and pop, moving average self.dist_sum = self.dist_sum + dmeters self.time_sum = self.time_sum + dt if (self.time_sum > 0.15): self.vx = self.dist_sum / self.time_sum self.dist_sum = 0 self.time_sum = 0 #bot.botx*100,bot.boty*100,bot.bot_deg odom_quat = tf.transformations.quaternion_from_euler( 0, 0, self.bot.bot_deg * 3.14 / 180.0) self.odom_broadcaster.sendTransform((self.bot.botx, self.bot.boty, 0.), odom_quat, t2, "base_link", "odom") odom = Odometry() odom.header.stamp = t2 odom.header.frame_id = "odom" # set the position odom.pose.pose = Pose(Point(self.bot.botx, self.bot.boty, 0.), Quaternion(*odom_quat)) # set the velocity odom.child_frame_id = "base_link" odom.twist.twist = Twist(Vector3(self.vx, 0, 0), Vector3(0, 0, gz_dps * 3.14 / 180.0)) # publish the message self.odom_pub.publish(odom) self.prev_time = t2
def callback(data): #Get the robot current position. We want to publish people detections relative to the /base_footprint index = [ i for i, x in enumerate(data.name) if 'vizzy::base_footprint' in x ] robot_pose = Pose() for i in index: robot_pose.position.x = float(data.pose[i].position.x) robot_pose.position.y = float(data.pose[i].position.y) robot_pose.position.z = float(data.pose[i].position.z) robot_pose.orientation.x = float(data.pose[i].orientation.x) robot_pose.orientation.y = float(data.pose[i].orientation.y) robot_pose.orientation.z = float(data.pose[i].orientation.z) robot_pose.orientation.w = float(data.pose[i].orientation.w) map_to_robot_tf = pm.fromMsg(robot_pose) # Get people's positions idx = [ i for i, x in enumerate(data.name) if ('pessoa' in x and 'base' in x) or ('person' in x and 'link' in x) ] # people = PoseArray() # people.header.frame_id = "base_footprint" # people.header.stamp = rospy.Time.now() people = TrackedPersonsList() people.header.frame_id = "base_footprint" people.header.stamp = rospy.Time.now() #Publish detections for i in idx: person = PersonTracker() person_pose = pm.fromMsg(data.pose[i]) rot = person_pose.M pos = person_pose.p [R, P, Y] = rot.GetRPY() Y -= math.pi / 2.0 if Y < 0: Y = Y + 2.0 * math.pi rotated = PyKDL.Frame(PyKDL.Rotation.RPY(R, P, Y), PyKDL.Vector(pos.x(), pos.y(), pos.z())) person.body_pose = pm.toMsg(map_to_robot_tf.Inverse() * rotated) #Add height to simulate that the face was detected person.body_pose.position.z += 1.7 #If people are too far away or behind the robot, we can't detect them if (math.sqrt(person.body_pose.position.x**2 + person.body_pose.position.y**2) > 4.0) or (person.body_pose.position.x < 0): continue people.personList.append(person) message_pub.publish(people)