Пример #1
0
 def obstacle(self, msg):
     obstacle_msg = VehiclePose()
     #A = there is object between self.obstacle_angle & self.target_angle
     if not A :
         obstacle_msg.collision = False
     else :
         obstacle_msg.collision = True   
     obstacle_msg.angle = self.obstacle_angle   
     self.pub_obstacle_pose.publish(obstacle_msg) 
Пример #2
0
    def pubPose(self, args=None):
        pose_msg_out = VehiclePose()

        pose_msg_out.rho.data = self.rho
        pose_msg_out.theta.data = 0.0
        pose_msg_out.psi.data = 0.0

        self.rho = self.rho + 0.1

        self.pose_pub.publish(pose_msg_out)
Пример #3
0
    def pubPose(self, args=None):
        pose_msg_out = VehiclePose()

        pose_msg_out.header.stamp = rospy.Time.now()
        pose_msg_out.rho.data = self.rho
        pose_msg_out.theta.data = 0.0
        pose_msg_out.psi.data = 0.0

        self.rho = self.rho + 0.1

        self.pose_pub.publish(pose_msg_out)
Пример #4
0
 def controllerInitialization(self):
     self.vehicle_pose_msg_temp = VehiclePose()
     self.vehicle_pose_msg_temp.header.stamp = rospy.Time.now()
     self.time_temp = rospy.Time.now()
     self.v_rel = 0
     self.v = 0
     self.detection = False
     self.v_error_temp = 0
     self.I = 0
     self.v_follower = 0
     #self.rho_temp = 0
     self.omega = 0
Пример #5
0
 def processCorners(self, vehicle_corners_msg):
     # do nothing - just relay the detection
     if self.lock.testandset():
         pose_msg_out = VehiclePose()
         pose_msg_out.rho.data = 0.0
         pose_msg_out.theta.data = 0.0
         pose_msg_out.psi.data = 0.0
         pose_msg_out.detection.data = \
           vehicle_corners_msg.detection.data
         self.pub_pose.publish(pose_msg_out)
         self.lock.unlock()
         return
Пример #6
0
    def __init__(self):
        self.node_name = rospy.get_name()
        self.last_vehicle_pose = VehiclePose()
        self.car_cmd_msg = Twist2DStamped()
        self.target_angle = Float32()
        self.obstacle_angle = Float32()#smallest angle one
        self.detect_target = Bool()

        #-----Publication-----
        self.pub_target_pose = rospy.Publisher("~target_pose", Twist2DStamped, queue_size=1)
        self.pub_obstacle_pose = rospy.Publisher("~obstacle_pose", Twist2DStamped, queue_size=1)
        #-----Subscriptions-----

        # safe shutdown
        rospy.on_shutdown(self.custom_shutdown)
    def cbimage(self, image_msg):
        pose_msg_out = VehiclePose()
        try:
#           image_cv=self.bridge.imgmsg_to_cv2(image_msg,"bgr8")
            np_arr = np.fromstring(image_msg.data, np.uint8)
            pose_msg_out.header.stamp = image_msg.header.stamp
            image_cv = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
            image_msg_out = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
        except CvBridgeError as e:
            print e
        # Load message
        cv_img = self.bridge.imgmsg_to_cv2( image_msg_out , desired_encoding="passthrough" )
        #np_arr = np.fromstring(msg.data, np.uint8)
        #cv_img = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        self.camera_IMG  = cv_img
        self.camera_msg  = image_msg_out
Пример #8
0
    def __init__(self):
        self.node_name = rospy.get_name()
        self.last_vehicle_pose = VehiclePose()

        self.car_cmd_msg = Twist2DStamped()
        self.car_cmd_msg.omega = 0.0
        self.car_cmd_msg.v = 0.0
        self.last_car_cmd_msg = self.car_cmd_msg

        self.last_omega = 0
        self.last_v = 0

        # Setup parameters
        self.dist_ref = self.setup_parameter("~dist_ref", 0.15)
        self.head_ref = self.setup_parameter("~head_ref", 0.0)
        self.k_follow = self.setup_parameter("~k_follow", 1.0)  # Linear velocity
        self.k_heading = self.setup_parameter("~k_heading", 1.0)  # P gain for theta

        self.head_thres = self.setup_parameter("~head_thres", math.pi / 4)  # Maximum desired heading
        self.max_speed = self.setup_parameter("~max_speed", 0.4)
        self.max_heading = self.setup_parameter("~max_heading", 0.2)
        self.deadspace_speed = self.setup_parameter("~deadspace_speed", 0.05)
        self.deadspace_heading = self.setup_parameter("~deadspace_heading", 0.2)
        self.alpha            = self.setup_parameter("~alpha", 1.0)
        self.alpha_psi        = self.setup_parameter("~alpha_psi", 0.0)
        
        # April ctl Params
        self.delay_go    = 0.2
        self.delay_pause = 0.2
        self.stop_pause  = True
        self.target = np.array([ 0.4 , 0.0  ])
        self.lost_bumper = False

        #-----Publication-----
        self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
        self.pub_april = rospy.Publisher("~apriltag_switch",  BoolStamped, queue_size=1)

        #-----Subscriptions-----
        #self.sub_target_pose_bumper = rospy.Subscriber("~target_pose", VehiclePose, self.cb_target_pose_bumper, queue_size=1)
        self.params_update = rospy.Timer(rospy.Duration.from_sec(1.0), self.update_params_event)
        self.sub_target_pose_april = rospy.Subscriber("~target_pose_april", VehiclePose, self.cb_target_pose_april, queue_size=1)

        # safe shutdown
        rospy.on_shutdown(self.custom_shutdown)

        # timer
        rospy.loginfo("[%s] Initialized " % (rospy.get_name()))
Пример #9
0
    def processCorners(self, vehicle_corners_msg):
        # do nothing - just relay the detection
        rospy.loginfo("HMMMM")
        if self.lock.testandset():
            start = rospy.Time.now()
            # print(start)
            self.calcCirclePattern(vehicle_corners_msg.H,
                                   vehicle_corners_msg.W)
            points = []
            for Point32 in vehicle_corners_msg.corners:
                point = [Point32.x, Point32.y]
                points.append(point)
            points = np.array(points)
            # points = np.reshape(points, (2,-1))
            # print(points)
            # print(self.pcm.intrinsicMatrix())
            size = [480, 640]
            focal_length = size[1]
            center = (size[1] / 2, size[0] / 2)
            camera_matrix = np.array([[focal_length, 0, center[0]],
                                      [0, focal_length, center[1]], [0, 0, 1]],
                                     dtype="double")
            print(self.pcm.distortionCoeffs())
            # print(self.circlepattern)
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(self.circlepattern, points,
                                                camera_matrix,
                                                self.pcm.distortionCoeffs())
            rospy.loginfo("WATWAT")
            print("Rot: {}".format(rotation_vector))
            print("Trans: {}".format(translation_vector))
            if success:
                points_reproj, _ = cv2.projectPoints(
                    self.circlepattern, rotation_vector, translation_vector,
                    camera_matrix, self.pcm.distortionCoeffs())
                error = 0
                for i in range(0, len(points_reproj)):
                    error += cv2.norm(points[i], points_reproj[i, 0],
                                      cv2.NORM_L2)

                mean_reproj_error = error / len(points_reproj)
                print(mean_reproj_error)
                print(self.max_reproj_pixelerror_pose_estimation)

                if mean_reproj_error < self.max_reproj_pixelerror_pose_estimation:
                    # print(translation_vector)
                    (R, jac) = cv2.Rodrigues(rotation_vector)
                    R_inv = np.transpose(R)
                    translation_vector = -np.dot(R_inv, translation_vector)
                    pose_msg_out = VehiclePose()
                    pose_msg_out.header.stamp = rospy.Time.now()
                    pose_msg_out.rho.data = sqrt(translation_vector[2]**2 +
                                                 translation_vector[0]**2)
                    pose_msg_out.psi.data = np.arctan2(
                        -R_inv[2, 0], sqrt(R_inv[2, 1]**2 + R_inv[2, 2]**2))
                    pose_msg_out.detection.data = vehicle_corners_msg.detection.data
                    R2 = np.array([[
                        cos(pose_msg_out.psi.data), -sin(pose_msg_out.psi.data)
                    ], [
                        sin(pose_msg_out.psi.data),
                        cos(pose_msg_out.psi.data)
                    ]])
                    translation_vector = - \
                        np.array([translation_vector[2],
                                    translation_vector[0]])
                    translation_vector = np.dot(np.transpose(R2),
                                                translation_vector)
                    pose_msg_out.theta.data = np.arctan2(
                        translation_vector[1], translation_vector[0])
                    self.pub_pose.publish(pose_msg_out)
                    rospy.loginfo("HEYO")
                else:
                    rospy.loginfo(
                        "[%s] Pose estimation failed, too high reprojection error."
                        % (self.node_name))
            else:
                rospy.loginfo("[%s] Pose estimation failed." %
                              (self.node_name))

            elapsed_time = (rospy.Time.now() - start).to_sec()
            self.pub_time_elapsed.publish(elapsed_time)
        self.lock.unlock()
        return
Пример #10
0
 def callback(self, msg):
     # Load tag detections message
     print("Here")
     target_pose_out = VehiclePose()
     target_pose_out.detection = False
     if len(msg.detections) == 0:
         self.pub_pose.publish(target_pose_out)
         return
     target_pose_out.detection = True
     detection = msg.detections[0]
     camera_x = rospy.get_param("~camera_x")
     camera_y = rospy.get_param("~camera_y")
     camera_z = rospy.get_param("~camera_z")
     camera_theta = rospy.get_param("~camera_theta")
     scale = rospy.get_param("~scale")
     #Load translation
     x = detection.transform.translation.x
     y = detection.transform.translation.y
     z = detection.transform.translation.z
     # translation tags(t) w/ camera(c) expressed in camera frame (Fc)
     t_tc_Fc = k.Vector(x, y, z)
     t_tc_Fc = t_tc_Fc * scale
     #Load rotation
     x = detection.transform.rotation.x
     y = detection.transform.rotation.y
     z = detection.transform.rotation.z
     w = detection.transform.rotation.w
     e = k.Vector(x, y, z)
     Q_Ftag_Fold = k.Quaternion(e, w)
     # New tag orientation reference (zero when facing camera) w/ to old tag ref used by the lib
     C_Ft_Ftag = k.RotationMatrix(
         np.matrix([[0, 0, -1], [-1, 0, 0], [0, 1, 0]]))
     Q_Ft_Ftag = C_Ft_Ftag.toQuaternion()
     # Rotation of old ref frame used by the lib w/ to camera frame
     C_Fold_Fc = k.RotationMatrix(
         np.matrix([[0, -1, 0], [0, 0, -1], [1, 0, 0]]))
     Q_Fold_Fc = C_Fold_Fc.toQuaternion()
     # Camera localization
     # translation of camera w/ vehicle origin in vehicle frame
     t_cv_Fv = k.Vector(camera_x, camera_y, camera_z)
     C_Fc_Fv = k.euler2RotationMatrix(
         0, camera_theta, 0)  # Rotation   of camera frame w/ vehicle frame
     Q_Fc_Fv = C_Fc_Fv.toQuaternion()
     # Compute tag orientation in vehicle frame
     Q_Ft_Fv = Q_Fc_Fv * Q_Fold_Fc * Q_Ftag_Fold * Q_Ft_Ftag
     # Compute position of tag in vehicle frame expressed in vehicle frame
     C_Fv_Fc = -C_Fc_Fv  # inverse transform
     t_tc_Fv = C_Fv_Fc * t_tc_Fc
     t_tv_Fv = t_tc_Fv + t_cv_Fv
     # Overwrite transformed value
     detection.transform.translation.x = t_tv_Fv.x
     detection.transform.translation.y = t_tv_Fv.y
     detection.transform.translation.z = t_tv_Fv.z
     detection.transform.rotation.x = Q_Ft_Fv.e.x
     detection.transform.rotation.y = Q_Ft_Fv.e.y
     detection.transform.rotation.z = Q_Ft_Fv.e.z
     detection.transform.rotation.w = Q_Ft_Fv.n
     A_Ft_Fv = Q_Ft_Fv.toAngleAxis()
     # Publish Message
     target_pose_out.x = detection.transform.translation.x
     target_pose_out.y = detection.transform.translation.y
     target_pose_out.rho = np.linalg.norm(
         [target_pose_out.x, target_pose_out.y])
     target_pose_out.theta = np.arctan2(target_pose_out.y,
                                        target_pose_out.x)
     target_pose_out.psi = 2 * detection.transform.rotation.z
     #print ("(x,y) = ", target_pose_out.x, target_pose_out.y)
     #print ("(rho, theta, psi)= ", target_pose_out.rho, target_pose_out.theta, target_pose_out.psi)
     self.pub_pose.publish(target_pose_out)
Пример #11
0
 def target(self):
     target_msg = VehiclePose()
     target_msg.angle = self.target_angle
     self.pub_target_pose.publish(target_msg)