예제 #1
0
파일: buoy_2d.py 프로젝트: jpanikulam/Sub8
    def request_buoy3d(self, srv):
        print "Requesting 3d pose"

        if (len(self.observations) > 5) and self.multi_obs is not None:
            estimated_pose = self.multi_obs.multilaterate(self.observations, self.pose_pairs)
            self.rviz.draw_sphere(estimated_pose, color=(0.2, 0.8, 0.0, 1.0), scaling=(0.5, 0.5, 0.5), frame='/map')
            resp = VisionRequestResponse(
                pose=PoseStamped(
                    header=Header(stamp=self.last_image_time, frame_id='/map'),
                    pose=Pose(
                        position=Point(*estimated_pose)
                    )
                ),
                found=True
            )
        else:
            if len(self.observations) <= 5:
                rospy.logerr("Did not attempt search because we did not have enough observations")
            else:
                rospy.logerr("Did not attempt search because buoys_2d was not fully initialized")

            resp = VisionRequestResponse(
                pose=PoseStamped(
                    header=Header(stamp=self.last_image_time, frame_id='/map'),
                ),
                found=False
            )
        return resp
예제 #2
0
    def request_buoy3d(self, srv):
        print "Requesting 3d pose"
        buoy = self.buoys[srv.target_name]
        if (len(buoy.observations) > 5) and self.multi_obs is not None:
            estimated_pose = self.multi_obs.lst_sqr_intersection(buoy.observations, buoy.pose_pairs)

            rospy.loginfo("===================================")
            rospy.loginfo("BUOY: est pose: {}".format(estimated_pose))
            rospy.loginfo("===================================")

            self.rviz.draw_sphere(estimated_pose, color=buoy.draw_colors,
                scaling=(0.5, 0.5, 0.5), frame='/map', _id=buoy.visual_id)

            resp = VisionRequestResponse(
                pose=PoseStamped(
                    header=Header(stamp=self.last_image_time, frame_id='/map'),
                    pose=Pose(
                        position=Point(*estimated_pose)
                    )
                ),
                found=True
            )
        else:
            if len(buoy.observations) <= 5:
                rospy.logerr("Did not attempt search because we did not have enough observations ({})".format(len(buoy.observations)))
            else:
                rospy.logerr("Did not attempt search because buoys_2d was not fully initialized")

            resp = VisionRequestResponse(
                pose=PoseStamped(
                    header=Header(stamp=self.last_image_time, frame_id='/map'),
                ),
                found=False
            )
        return resp
예제 #3
0
 def cb_3d(self, req):
     res = VisionRequestResponse()
     if (self.last2d == None or not self.enabled):
         res.found = False
     else:
         res.pose.header.frame_id = self.cam.tfFrame()
         res.pose.header.stamp = self.last_found_time
         res.pose.pose.position = numpy_to_point(self.last3d[0])
         res.pose.pose.orientation = numpy_to_quaternion(self.last3d[1])
         res.found = True
     return res
예제 #4
0
 def request_buoy3d(self, srv):
     '''
     Callback for 3D vision request. Uses recent observations of buoy
     specified in target_name to attempt a least-squares position estimate.
     As buoys are spheres, orientation is meaningless.
     '''
     if srv.target_name not in self.buoys or not self.enabled:
         return VisionRequestResponse(found=False)
     buoy = self.buoys[srv.target_name]
     if buoy.est is None:
         return VisionRequestResponse(found=False)
     return VisionRequestResponse(pose=PoseStamped(
         header=Header(stamp=self.last_image_time, frame_id='/map'),
         pose=Pose(position=Point(*buoy.est))),
                                  found=True)
예제 #5
0
 def request_buoy(self, srv):
     '''
     Callback for 3D vision request. Uses recent observations of target
     board  specified in target_name to attempt a least-squares position
     estimate. Ignoring orientation of board.
     '''
     if not self.enabled:
         return VisionRequestResponse(found=False)
     # buoy = self.buoys[srv.target_name]
     if self.est is None:
         return VisionRequestResponse(found=False)
     return VisionRequestResponse(pose=PoseStamped(
         header=Header(stamp=self.last_image_time, frame_id='/map'),
         pose=Pose(position=Point(*self.est))),
                                  found=True)
예제 #6
0
 def _vision_cb_3D(self, req):
     res = VisionRequestResponse()
     if self.last_found_time_3D is None or self.image_sub.last_image_time is None:
         res.found = False
         return res
     dt = (self.image_sub.last_image_time - self.last_found_time_3D).to_sec()
     if dt < 0 or dt > self.timeout_seconds:
         res.found = False
     elif (self.last3d is None or not self.enabled):
         res.found = False
     else:
         res.pose.header.frame_id = "/map"
         res.pose.header.stamp = self.last_found_time_3D
         res.pose.pose.position = numpy_to_point(self.last3d[0])
         res.pose.pose.orientation = numpy_to_quaternion(self.last3d[1])
         res.found = True
     return res
예제 #7
0
 def request_board3d(self, srv):
     '''
     Callback for 3D vision request. Uses recent observations of target
     board  specified in target_name to attempt a least-squares position
     estimate. Ignoring orientation of board.
     '''
     if not self.enabled:
       print("REEEE")  
       return VisionRequestResponse(found=False)
     #buoy = self.buoys[srv.target_name]
     if self.est is None:
         self.print_info("NO ESTIMATE!")
         return VisionRequestResponse(found=False)
     # NOTE: returns normal vec encoded into a quaternion message (so not actually a quaternion)
     return VisionRequestResponse(
         pose=PoseStamped(
             header=Header(stamp=rospy.Time.now(), frame_id='/map'),
             pose=Pose(position=Point(*((self.est + self.est1 + self.est2) / 3)),
                       orientation=Quaternion(x=self.plane[0], y=self.plane[1], z=self.plane[2]))),
             found=True)
예제 #8
0
def handle_fake_perception(extra, target_object):
    '''
    Calls the GetModelState service from gazebo to get the realtime position of the model targeted.
    Provides this information to the mission.
    @param extra The target_name passed through some missions.
    Other missions do not pass a target_name thus its lable of extra.
    @param target_object Is the model name of the object targeted by the mission.
    Missions that do not pass a target_name must use this.
    '''

    now = rospy.get_rostime()
    k = np.uint32(0)
    if extra != '':
        target_object = extra
    if target_object == '':
        fprint("NO TARGET")
        sys.exit(0)
    model = get_position(target_object)
    # Library of offets. Models must be manually offset as gazebo coordinates != center of model.
    centlib = {
        'start_gate': Point(1.5, 0, 0),
        'nav_gate': Point(1.15, 0, 0),
        'orange_rectangle': Point(0, 0, 2)
    }
    if target_object in centlib:
        offset = centlib[target_object]
    else:
        offset = Point(0, 0, 0)
    pose_stamp = PoseStamped(
        header=Header(seq=k, stamp=now, frame_id="/map"),
        # Offset our pose by the starting position of the sub relative to the world in Gazebo.
        pose=Pose(position=Point(model.pose.position.x - 13 + offset.x,
                                 model.pose.position.y - 24 + offset.y,
                                 model.pose.position.z + offset.z),
                  orientation=model.pose.orientation))
    covariance_diagonal = Vector3(0, 0, 0)
    found = True
    resp2 = VisionRequestResponse(pose_stamp, covariance_diagonal, found)

    return resp2
예제 #9
0
 def _vision_cb_3D(self, req):
     res = VisionRequestResponse()
     if self.last_found_time_3D is None or self.image_sub.last_image_time is None:
         res.found = False
         return res
     dt = (self.image_sub.last_image_time -
           self.last_found_time_3D).to_sec()
     if dt < 0 or dt > self.timeout_seconds:
         res.found = False
     elif (self.last3d is None or not self.enabled):
         res.found = False
     else:
         res.pose.header.frame_id = "/map"
         res.pose.header.stamp = self.last_found_time_3D
         res.pose.pose.position = numpy_to_point(self.last3d[0])
         res.pose.pose.orientation = numpy_to_quaternion(self.last3d[1])
         res.found = True
     return res