def __init__(self):
        self.curr_pose = PoseStamped()
        self.cam_img = PoseStamped()
        self.des_pose = PoseStamped()
        self.destination_pose = PoseStamped()
        self.pixel_img = Image()
        self.camera_image = Image()
        self.truck_target_x = 0
        self.truck_target_y = 0
        self.truck_target_z = 0
        self.image_target =[]
        self.depth = Image()
        self.KP= .005
        self.counter = 0
        self.destination_x = 0
        self.destination_y = 0
        self.destination_z = 0
        self.destination_x_previous = 0
        self.destination_y_previous = 0
        self.destination_z_previous = 0
        self.hover_x = 0
        self.hover_y = 0
        self.hover_z = 0
        self.detection_count = 0
        self.ray_target = []
        self.rel_coordinates = []
        self.flag = "False"
        self.is_ready_to_fly = False
        self.hover_loc = [self.hover_x, self.hover_y, self.hover_z, 0, 0, 0, 0] # Hovers 3meter above at this location 
        self.suv_search_loc = [10,-62, 10]
        self.mode = "FLYTODESTINATION"
        self.phase = "SEARCH"
        self.dist_threshold = 0.4
        self.waypointIndex = 0
        self.sim_ctr = 1
        self.arm = False
        self.range = 0

        # define ros subscribers and publishers
        rospy.init_node('OffboardControl', anonymous=True)
        self.pose_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback=self.pose_callback)
        self.state_sub = rospy.Subscriber('/mavros/state', State, callback=self.state_callback)
        self.vel_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size=10)
        #self.attach = rospy.Publisher('/attach', String, queue_size=10)
        camera_info = rospy.wait_for_message("/uav_camera_down/camera_info", CameraInfo)
        camera_info2 = rospy.wait_for_message("/uav_camera_front/rgb/camera_info",CameraInfo)
        self.pinhole_camera_model = PinholeCameraModel()
        self.pinhole_camera_model_rgb = PinholeCameraModel()
        self.pinhole_camera_model.fromCameraInfo(camera_info)
        #rospy.Subscriber('/uav_camera/image_raw_down',Image,callback=self.pixel_image, callback_args = '/uav_camera/image_raw_down')
        rospy.Subscriber('/uav_camera_down/image_raw',Image,callback=self.pixel_image)
        rospy.Subscriber('/camera/depth/image_raw',Image,callback=self.depth_image)
        rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, callback=self.yolo)
        self.decision = rospy.Subscriber('/data', String, callback=self.set_mode)
        self.sonar = rospy.Subscriber('/sonar', Range, callback=self.range_callback)
        self.controller()
Exemple #2
0
 def cb_camera_info(self, msg):
     # unsubscribe from camera_info
     self.loginfo(
         "Camera info message received. Unsubscribing from camera_info topic."
     )
     # noinspection PyBroadException
     try:
         self.sub_camera_info.shutdown()
     except BaseException:
         pass
     # ---
     H, W = msg.height, msg.width
     # create new camera info
     self.camera_model = PinholeCameraModel()
     self.camera_model.fromCameraInfo(msg)
     # find optimal rectified pinhole camera
     with self.profiler("/cb/camera_info/get_optimal_new_camera_matrix"):
         rect_camera_K, _ = cv2.getOptimalNewCameraMatrix(
             self.camera_model.K, self.camera_model.D, (W, H),
             self.alpha.value)
     # create rectification map
     with self.profiler("/cb/camera_info/init_undistort_rectify_map"):
         self.mapx, self.mapy = cv2.initUndistortRectifyMap(
             self.camera_model.K, self.camera_model.D, None, rect_camera_K,
             (W, H), cv2.CV_32FC1)
     # pack rectified camera info into a CameraInfo message
     self.rect_camera_info = CameraInfo(
         width=W,
         height=H,
         K=rect_camera_K.flatten().tolist(),
         R=np.eye(3).flatten().tolist(),
         P=np.zeros((3, 4)).flatten().tolist(),
     )
Exemple #3
0
    def centroid_to_pose(self, rect, moment):
        """
        Publish the region of interest as a geometry_msgs/Pose message from
        the ROI or the center of mass coordinates
        @param rect - sensor_msgs/RegionOfInterest
        @param moment - object's center of mass
        """
        if not moment:
            moment = (int(rect[0] + rect[2] / 2), int(rect[1] + rect[3] / 2))
        u, v = int(moment[0]), int(moment[1])

        ps = Pose()
        ps.orientation.x = 0.0
        ps.orientation.y = 0.0
        ps.orientation.z = 0.0
        ps.orientation.w = 1.0

        cam = PinholeCameraModel()
        cam.fromCameraInfo(self.cam_info)
        (x, y, z) = cam.projectPixelTo3dRay((u, v))

        ps.position.x = x
        ps.position.y = y
        ps.position.z = z

        return ps
Exemple #4
0
    def __init__(self):
        """
        Initialize Color Tracking ROS interface.
        """
        rospy.init_node('color_tracker')
        self.tracker = ColorTracker()
        self.cv_image = self.processed_image = None  # the latest image from the camera
        self.boxes = []
        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV
        self.cameraModel = PinholeCameraModel()
        self.tf = TransformListener(cache_time=rospy.Duration.from_sec(20))

        # parameters ...
        self._gui = bool(rospy.get_param('~gui', default=False)) # set to _gui:=true for debug display
        self._rate = float(rospy.get_param('~rate', default=50.0)) # processing rate
        self._par = float(rospy.get_param('~plate_area', default=0.0338709))

        # publishers ...
        self.debug_pub = rospy.Publisher("tracker/debug", PoseWithCovarianceStamped, queue_size=10)
        self.roomba_pub = rospy.Publisher("visible_roombas", RoombaList, queue_size = 10)

        rospy.loginfo("Initializing Color Tracker")
        if self._gui:
            cv2.namedWindow('preview_window')
            cv2.namedWindow('binary')

        # start listening ...
        rospy.Subscriber("/ardrone/bottom/image_raw", Image, self.process_image)
        rospy.Subscriber("/ardrone/bottom/camera_info", CameraInfo, self.on_camera_info)
Exemple #5
0
        def __init__(self,resize_width,resize_height):
            self.resize_width = resize_width
            self.resize_height = resize_height
            self.bridge = CvBridge()

            
            camera_info = rospy.wait_for_message('/usb_cam/camera_info', CameraInfo, timeout=5)
            # except(rospy.ROSException), e:
            #     print ("Camera info topic not available")
            #     print (e)
            #     exit()
            waf = float(resize_width) / camera_info.width
            haf = float(resize_height) / camera_info.height
            camera_info.height = resize_height
            camera_info.width = resize_width

            # adjust the camera matrix
            K = camera_info.K
            camera_info.K = (K[0]*waf,         0.,  K[2]*waf,
                                    0.,  K[4]*haf,  K[5]*haf,
                                    0.,        0.,         1.)

            # adjust the projection matrix
            P = camera_info.P
            camera_info.P = (P[0]*waf,        0.,  P[2]*waf,  0.,
                                0.,  P[5]*haf,  P[6]*haf,  0.,
                                0.,        0.,        1.,  0.)

            self.camera_model = PinholeCameraModel()
            self.camera_model.fromCameraInfo(camera_info)
            print (camera_info)
Exemple #6
0
def dpx_to_distance(dx, dy, camera_name, current_ps, offset):
    print("The dx is " + str(dx) + " the dy is " + str(dy) + " and the camera name is " + camera_name)

    big_z_mappings = {'camera1': transform_broadcaster_mapping['camera1'][0][2] - current_ps.pose.position.z, # x-y plane
                      'camera2': transform_broadcaster_mapping['camera2'][0][1] - current_ps.pose.position.y, # x-z plane
                      'camera3': transform_broadcaster_mapping['camera3'][0][0] - current_ps.pose.position.x} # y-z plane

    #print("The frame_id for the current pose is " + current_ps.header.frame_id)
    camera_model = PinholeCameraModel()
    camera_model.fromCameraInfo(camera_info_mapping[camera_name])
    x, y, z = camera_model.projectPixelTo3dRay((dx, dy))
    #print " x : {} , y : {} , z : {}".format(x, y, z)
    x_center, y_center, z_center = camera_model.projectPixelTo3dRay((0, 0))
    big_z = abs(big_z_mappings[camera_name])
    #print("The big_z for " + camera_name + " is " + str(big_z))
    big_x = (x / z) * big_z  # Use law of similar trianges to solve
    big_y = (y / z) * big_z

    big_x_center = (x_center / z_center) * big_z
    big_y_center = (y_center / z_center) * big_z

    #print("The x_center  is " + str(x_center) + " the y_center  is " + str(y_center) + " and the z_center is " + str(
    #    z_center))
    #print(
    #"The x distance is " + str(big_x - big_x_center) + " the y distance is " + str(big_y - big_y_center) + " and the camera name is " + camera_name + "\n")
    if offset:
        return big_x - big_x_center, big_y - big_y_center
    else:
        return big_x, big_y
Exemple #7
0
    def __init__(self):

        # Publish an image topic and show view of image
        self.image_pub = rospy.Publisher("image_topic_2", Image, queue_size=10)
        cv2.namedWindow("Image window", 1)

        # Use CvBridge to convert from OpenCv to ros
        self.bridge = CvBridge()

        # Set up data member to get camera info
        self.cam_model = PinholeCameraModel()

        # Subscribe to the head_camera/rgb/image_raw camera topic
        self.image_sub = rospy.Subscriber("head_camera/rgb/image_raw", Image,
                                          self.get_coordinates)

        # Subscribe to the head_camera/depth_registered/camera_info
        #self.image_sub = rospy.Subscriber("head_camera/depth_registered/image_raw", Image, self.callback)

        # Set up transform listener
        self.tf_listener = TransformListener()

        self.cam_info_pub = rospy.Publisher("image_topic_3",
                                            CameraInfo,
                                            queue_size=10)
        self.cam_info_sub = rospy.Subscriber(
            "/head_camera/depth_registered/camera_info", CameraInfo,
            self.set_camera_info)
    def __init__(self, camera_name, base_frame, table_height):
        """
        Initialize the instance

        :param camera_name: The camera name. One of (head_camera, right_hand_camera)
        :param base_frame: The frame for the robot base
        :param table_height: The table height with respect to base_frame
        """
        self.camera_name = camera_name
        self.base_frame = base_frame
        self.table_height = table_height

        self.image_queue = Queue.Queue()
        self.pinhole_camera_model = PinholeCameraModel()
        self.tf_listener = tf.TransformListener()

        camera_info_topic = "/io/internal_camera/{}/camera_info".format(
            camera_name)
        camera_info = rospy.wait_for_message(camera_info_topic, CameraInfo)

        self.pinhole_camera_model.fromCameraInfo(camera_info)

        cameras = intera_interface.Cameras()
        cameras.set_callback(camera_name,
                             self.__show_image_callback,
                             rectify_image=True)
Exemple #9
0
 def _enable(self):
     if self._camera_info is None:
         self._camera_info = self._image_sub.wait_for_camera_info()
         self.camera_model = PinholeCameraModel()
         self.camera_model.fromCameraInfo(self._camera_info)
     self._enabled = True
     rospy.loginfo("Enabled.")
Exemple #10
0
def FindDist(color):

    cont = checkIfColorIsShowed(color)

    if cont == []:
        rospy.sleep(0.5)

        print "None"
        return None
    cnt = cont[0]
    M = cv2.moments(cnt)
    center_x = int(M['m10'] / M['m00'])
    center_y = 400

    imgCntr = (400, 400)
    camInfo = rospy.wait_for_message("camera/camera_info", CameraInfo)
    camera = PinholeCameraModel()
    camera.fromCameraInfo(camInfo)
    cenRay = camera.projectPixelTo3dRay(imgCntr)
    pixRay = camera.projectPixelTo3dRay((center_x, center_y))

    angle = math.acos(
        prod(pixRay, cenRay) / (math.sqrt(prod(pixRay, pixRay))) *
        (math.sqrt(prod(cenRay, cenRay))))

    dg = int(math.degrees(angle))

    if center_x >= imgCntr[0]:
        dg = 360 - dg

    LaserMessage = rospy.wait_for_message("scan", LaserScan)
    return retrunResult(LaserMessage, dg)
Exemple #11
0
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()
        self.active = True
        self.currentImg = None

        self.publish_duration = rospy.Duration.from_sec(1.0 / 2.0)
        self.last_stamp = rospy.Time.now()

        #Subscribers and publishers
        self.imagesub = rospy.Subscriber("~image",
                                         CompressedImage,
                                         self.find_marker,
                                         buff_size=921600,
                                         queue_size=1)
        self.movepub = rospy.Publisher("~car_cmd",
                                       Twist2DStamped,
                                       queue_size=10)
        self.imagepub = rospy.Publisher("~image", Image, queue_size=1)
        self.infosub = rospy.Subscriber("~camera_info",
                                        CameraInfo,
                                        self.get_camera_info,
                                        queue_size=1)
        self.camerainfo = PinholeCameraModel()

        rospy.loginfo("[%s] Initialization completed" % (self.node_name))
Exemple #12
0
    def __init__(self, img_proc=None):
        super(LandmarkMethodROS,
              self).__init__(device_id_facedetection=rospy.get_param(
                  "~device_id_facedetection", default="cuda:0"))
        self.subject_tracker = FaceEncodingTracker() if rospy.get_param(
            "~use_face_encoding_tracker",
            default=True) else SequentialTracker()
        self.bridge = CvBridge()
        self.__subject_bridge = SubjectListBridge()

        self.ros_tf_frame = rospy.get_param("~ros_tf_frame",
                                            "/kinect2_nonrotated_link")

        self.tf_broadcaster = TransformBroadcaster()
        self.tf_prefix = rospy.get_param("~tf_prefix", default="gaze")
        self.visualise_headpose = rospy.get_param("~visualise_headpose",
                                                  default=True)

        self.pose_stabilizers = {}  # Introduce scalar stabilizers for pose.

        try:
            if img_proc is None:
                tqdm.write("Wait for camera message")
                cam_info = rospy.wait_for_message("/camera_info",
                                                  CameraInfo,
                                                  timeout=None)
                self.img_proc = PinholeCameraModel()
                # noinspection PyTypeChecker
                self.img_proc.fromCameraInfo(cam_info)
            else:
                self.img_proc = img_proc

            if np.array_equal(
                    self.img_proc.intrinsicMatrix().A,
                    np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])):
                raise Exception(
                    'Camera matrix is zero-matrix. Did you calibrate'
                    'the camera and linked to the yaml file in the launch file?'
                )
            tqdm.write("Camera message received")
        except rospy.ROSException:
            raise Exception("Could not get camera info")

        # multiple person images publication
        self.subject_pub = rospy.Publisher("/subjects/images",
                                           MSG_SubjectImagesList,
                                           queue_size=3)
        # multiple person faces publication for visualisation
        self.subject_faces_pub = rospy.Publisher("/subjects/faces",
                                                 Image,
                                                 queue_size=3)

        Server(ModelSizeConfig, self._dyn_reconfig_callback)

        # have the subscriber the last thing that's run to avoid conflicts
        self.color_sub = rospy.Subscriber("/image",
                                          Image,
                                          self.process_image,
                                          buff_size=2**24,
                                          queue_size=3)
Exemple #13
0
    def __init__(self):

        # defaults overwritten by param
        self.robot_name = "shamrock"
        self.rectified_input = False
        
        # read calibrations
        self.extrinsics_filename = (get_ros_package_path('duckietown') +
                               "/config/baseline/calibration/camera_extrinsic/" +
                               self.robot_name + ".yaml")
        if not os.path.isfile(extrinsics_filename):
            logger.warn("no robot specific extrinsic calibration, trying default")
            alternate_extrinsics_filename = (get_ros_package_path('duckietown') +
                                   "/config/baseline/calibration/camera_extrinsic/default.yaml")
            if not os.path.isfile(alternate_extrinsics_filename):
                logger.warn("can't find default either, something's wrong")
            else:
                self.H = self.load_homography(alternate_extrinsics_filename)
        else:
            self.H = self.load_homography(self.extrinsics_filename)
        self.H_inv = np.linalg.inv(self.H)
        
        intrinsics_filename = (get_ros_package_path('duckietown') + "/config/baseline/calibration/camera_intrinsics/" + self.robot_name + ".yaml")
        if not os.path.isfile(intrinsics_filename):
            logger.warn("no robot specific  calibration, trying default")
            intrinsics_filename = (get_ros_package_path('duckietown') +
                                   "/config/baseline/calibration/camera_extrinsic/default.yaml")
            if not os.path.isfile(extrinsics_filename):
                logger.error("can't find default either, something's wrong")

                
        self.ci_ = self.load_camera_info(intrinsics_filename)
        self.pcm_ = PinholeCameraModel()
        self.pcm_.fromCameraInfo(self.ci)
 def __init__(self):
     cv2.namedWindow('AR_TRACKER')
     self.bridge = cv_bridge.CvBridge()
     self.cam_info_sub = rospy.Subscriber('camera/rgb/camera_info',
                                          CameraInfo, self.info_cb)
     # self.pose_sub = rospy.Subscriber('pose', Pose, self.pose_cb)
     # self.img_sub = rospy.Subscriber('camera/rgb/image_raw/compressed', CompressedImage, self.img_cb, queue_size = 1, buff_size=2**24)
     self.img_sub = rospy.Subscriber('usb_cam/image_raw/compressed',
                                     CompressedImage,
                                     self.img_cb,
                                     queue_size=1,
                                     buff_size=2**24)
     self.ar_sub = rospy.Subscriber('ar_pose_marker', AlvarMarkers,
                                    self.ar_cb)
     self.pub = rospy.Publisher('ar_location', String, queue_size=5)
     # self.control_pub = rospy.Publisher('ar_control', Bool, queue_size=5)
     # self.stop_linear = False
     # self.straight_ahead = False
     # self.init = [False, False]
     # self.initial_theta = None
     # self.theta = None
     # self.pos = False
     self.count = 0
     self.axis = np.float32([[0.1, 0, 0], [0, 0.1, 0], [0, 0, -0.1],
                             [0, 0, 0]]).reshape(-1, 3)
     self.objp = np.zeros((6 * 8, 3), np.float32)
     self.objp[:, :2] = np.mgrid[0:8, 0:6].T.reshape(-1, 2)
     # self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
     self.K = None
     self.D = None
     self.markers = []
     self.cam_info = None
     self.pinhole = PinholeCameraModel()
 def _cinfo_cb(self, msg):
     # create mapx and mapy
     H, W = msg.height, msg.width
     # create new camera info
     self.camera_model = PinholeCameraModel()
     self.camera_model.fromCameraInfo(msg)
     # find optimal rectified pinhole camera
     with self.profiler('/cb/camera_info/get_optimal_new_camera_matrix'):
         rect_K, _ = cv2.getOptimalNewCameraMatrix(
             self.camera_model.K,
             self.camera_model.D,
             (W, H),
             self.rectify_alpha
         )
         # store new camera parameters
         self._camera_parameters = (rect_K[0, 0], rect_K[1, 1], rect_K[0, 2], rect_K[1, 2])
     # create rectification map
     with self.profiler('/cb/camera_info/init_undistort_rectify_map'):
         self._mapx, self._mapy = cv2.initUndistortRectifyMap(
             self.camera_model.K,
             self.camera_model.D,
             None,
             rect_K,
             (W, H),
             cv2.CV_32FC1
         )
     # once we got the camera info, we can stop the subscriber
     self.loginfo('Camera info message received. Unsubscribing from camera_info topic.')
     # noinspection PyBroadException
     try:
         self._cinfo_sub.shutdown()
     except BaseException:
         pass
Exemple #16
0
    def __init__(self):

        # Pull constants from config file
        self.lower = rospy.get_param('~lower_color_threshold', [0, 0, 80])
        self.upper = rospy.get_param(
            '~higher_color_threshold', [200, 200, 250])
        self.min_contour_area = rospy.get_param('~min_contour_area', .001)
        self.max_contour_area = rospy.get_param('~max_contour_area', 400)
        self.min_trans = rospy.get_param('~min_trans', .05)
        self.max_velocity = rospy.get_param('~max_velocity', 1)
        self.timeout = rospy.Duration(
            rospy.get_param('~timeout_seconds'), 250000)
        self.min_observations = rospy.get_param('~min_observations', 8)
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_board3d)
        self.image_pub = Image_Publisher("torp_vision/debug")
        self.point_pub = rospy.Publisher(
            "torp_vision/points", Point, queue_size=1)
        self.mask_image_pub = rospy.Publisher(
            'torp_vison/mask', Image, queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)
Exemple #17
0
    def __init__(self):
        self._imageInputName = rospy.resolve_name('image')
        print(self._imageInputName)

        self._velodyneOutputName = rospy.resolve_name('velodyne_rgb_points')
        print(self._velodyneOutputName)

        self._cameraName = rospy.resolve_name('camera')
        print(self._cameraName)

        self._velodyneName = rospy.resolve_name('velodyne')
        print(self._velodyneName)

        self._imageInput = rospy.Subscriber(self._imageInputName,
                                            Image,
                                            callback=self.imageCallback,
                                            queue_size=1)
        self._camera = rospy.Subscriber(self._cameraName,
                                        CameraInfo,
                                        callback=self.cameraCallback,
                                        queue_size=1)
        self._velodyne = rospy.Subscriber(self._velodyneName,
                                          PointCloud2,
                                          callback=self.velodyneCallback,
                                          queue_size=1)

        self._velodyneOutput = rospy.Publisher(self._velodyneOutputName,
                                               PointCloud2,
                                               queue_size=1)

        self._bridge = cv_bridge.CvBridge()
        self._cameraModel = PinholeCameraModel()
        self._tf = tf.TransformListener()
Exemple #18
0
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()

        self.config = self.setupParam("~config", "baseline")
        self.cali_file_name = self.setupParam("~cali_file_name", "default")
        rospack = rospkg.RosPack()
        self.cali_file = "/code/catkin_ws/src/dt-core/packages/vehicle_detection/config/vehicle_filter_node/" +  \
            self.cali_file_name + ".yaml"
        if not os.path.isfile(self.cali_file):
            rospy.logwarn("[%s] Can't find calibration file: %s.\n" %
                          (self.node_name, self.cali_file))
        self.loadConfig(self.cali_file)

        self.sub_corners = rospy.Subscriber("~corners",
                                            VehicleCorners,
                                            self.processCorners,
                                            queue_size=1)

        self.pub_pose = rospy.Publisher("~pose", VehiclePose, queue_size=1)
        self.sub_info = rospy.Subscriber("~camera_info",
                                         CameraInfo,
                                         self.processCameraInfo,
                                         queue_size=1)
        self.pub_time_elapsed = rospy.Publisher("~pose_calculation_time",
                                                Float32,
                                                queue_size=1)
        self.pcm = PinholeCameraModel()
        rospy.loginfo("[%s] Initialization completed" % (self.node_name))
        self.lock = mutex()
Exemple #19
0
    def __init__(self):
        rospy.init_node('offboard_test', anonymous=True)
        self.loc = []
        rospy.Subscriber('/mavros/local_position/pose',
                         PoseStamped,
                         callback=self.mocap_cb)
        rospy.Subscriber('/mavros/state', State, callback=self.state_cb)
        rospy.Subscriber('/dreams/state',
                         String,
                         callback=self.update_state_cb)
        rospy.Subscriber('/darknet_ros/bounding_boxes',
                         BoundingBoxes,
                         callback=self.yolo)
        rospy.Subscriber('/darknet_ros/detection_image',
                         Image,
                         callback=self.detected_image)
        rospy.Subscriber('/camera/depth/image_raw',
                         Image,
                         callback=self.depth_image)
        #self.camera.fromCameraInfo(self.rgb_info())
        # Zhiang: use this instead
        camera_info = rospy.wait_for_message(
            "/camera/depth/camera_info",
            CameraInfo)  # Zhiang: make sure this is the correct camera
        #camera_info = rospy.wait_for_message("/camera/rgb/camera_info", CameraInfo)
        self.pinhole_camera_model = PinholeCameraModel()
        self.pinhole_camera_model.fromCameraInfo(camera_info)

        #self.listener =  tf.TransformListener()  # linked to tf_static
        self.planner()
Exemple #20
0
def process_msgs(msgs, cinfo, shift):
    ret = len(msgs) * [None]

    cam_model = PinholeCameraModel()
    if cinfo is not None:
        cam_model.fromCameraInfo(cinfo)

    offset = None
    for it in range(0, len(msgs)):
        cur_msg = msgs[it]
        out_msg = msg(cur_msg.header.stamp.to_sec(), list())
        if cinfo is not None:
            # rospy.loginfo("using cinfo")
            for det in cur_msg.points:
                xyz = np.array([det.x, det.y, det.z])
                if offset is None:
                    offset = xyz
                xyz = xyz - offset + shift
                # xyz = np.dot(xyz, R)
                out_msg.positions.append(xyz)
        else:
            # rospy.loginfo("not using cinfo")
            x = cur_msg.pose.pose.position.x
            y = cur_msg.pose.pose.position.y
            z = cur_msg.pose.pose.position.z
            out_msg.positions.append(np.array([x, y, z]))
        ret[it] = out_msg
    return ret
Exemple #21
0
    def __init__(self, cameraInfoFile):
        """ Overlay lidar points onto images by setting up the camera model, camera intrinsics, and lidar-camera extrinsics.
        ===============
        cameraInfoFile : an opened yaml file that stores camera intrinsics and other params
            used to initialize the cameraInfo which is used to help cameraModel reproject lidar points to image space. 
        """
        self.cameraParams = yaml.load(cameraInfoFile)
        self.cameraInfo = CameraInfo()
        self._fillCameraInfo()
        self.cameraModel = PinholeCameraModel()
        self.cameraModel.fromCameraInfo(self.cameraInfo)

        print('Distortion model:', self.cameraInfo.distortion_model)

        self.bridge = cv_bridge.CvBridge()
        # Get transformation/extrinsics between lidar (velodyne frame) and camera (world frame)
        self.tf = tf.TransformListener()

        # Get the topics' names to subscribe or publish
        self.inImageName = rospy.resolve_name('image')
        self.outImageName = rospy.resolve_name('lidar_image')
        self.lidarName = rospy.remap_name('lidar')

        # Create subscribers and publishers
        self.subImage = rospy.Subscriber(self.inImageName,
                                         Image,
                                         callback=self.imageCallback,
                                         queue_size=1)
        self.lidar = rospy.Subscriber(self.lidarName,
                                      PointCloud2,
                                      callback=self.lidarCallback,
                                      queue_size=1)
        self.pubImage = rospy.Publisher(self.outImageName, Image, queue_size=1)

        self.lidarPoints = None
def get_camera_model(bag, camera_name):
    camera_infos = [msg.message for msg in bag.read_messages(camera_name)]
    camera_info = camera_infos[0]
    from image_geometry import PinholeCameraModel
    camera_model = PinholeCameraModel()
    camera_model.fromCameraInfo(camera_info)
    return camera_model
    def __init__(self, camera_info, homography, debug=False):
        self.pcm = PinholeCameraModel()
        self.pcm.fromCameraInfo(camera_info)
        self.H = homography  # maps points on ground plane to image plane
        self.debug = debug

        self.mapx, self.mapy = self._init_rectification()
Exemple #24
0
 def __init__(self, camera_info, maxdist, transform=None):
   """
   Parameters
   ----------
   camera_info: sensor_msgs/CameraInfo
     Meta information of the camera. For more details check the
     `sensor_msgs/CameraInfo
     <http://docs.ros.org/api/sensor_msgs/html/msg/CameraInfo.html>`_
     documentation.
   maxdist: float
    Maximum distance the FOV covers in the Z direction of the camera frame.
   transform: array_like
    Homogenous transformation of the camera reference frame. If `None` then
    the identity matrix is used.
   """
   if transform is None:
     transform = np.eye(4)
   self.maxdist = maxdist
   self.transform = np.array(transform)
   self.cam_model = PinholeCameraModel()
   self.cam_model.fromCameraInfo(camera_info)
   # Compute FOV corners
   delta_x = self.cam_model.getDeltaX(self.cam_model.width/2, self.maxdist)
   delta_y = self.cam_model.getDeltaY(self.cam_model.height/2, self.maxdist)
   self.corners = np.zeros((5,3))
   self.corners[0,:] = transform[:3,3]
   idx = 1
   for k in itertools.product([-1,1],[-1,1]):
     point = np.array([0, 0, self.maxdist, 1])
     point[:2] = np.array([delta_x, delta_y]) * np.array(k)
     self.corners[idx,:] = np.dot(transform, point)[:3]
     idx += 1
Exemple #25
0
    def __init__(self, publish_tf):
        #To take our Ros Image into a cv message and subsequently a numpy array.
        self.bridge = CvBridge()

        # To make the pixel to vector projection
        self.cam_model = PinholeCameraModel()

        #We need CameraInfo in order to use PinholeCameraModel below.
        rospy.Subscriber("camera_topic", CameraInfo, self.camera_callback)
        self.hasCameraInfo = False

        while not self.hasCameraInfo:
            print "waiting on camera info."
            rospy.sleep(0.5)

        #We are using a depth image to get depth information of what we're tracking.
        rospy.Subscriber("depth_image", Image, self.depth_callback)

        #This package is just an extension of cmvision to provide tf tracking of the blobs provided by cmvision.
        rospy.Subscriber('blobs', Blobs, self.blob_callback)

        #Subscribe to image for debugging.
        # rospy.Subscriber('thing', Image, self.image_callback)
        self.listener = tf.TransformListener()
        self.broadcaster = tf.TransformBroadcaster()

        #Republish each blob as part of a blob.
        self.blob_pub = rospy.Publisher('/blobs_3d', Blobs3d)

        self.publish_tf = publish_tf
Exemple #26
0
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()

        self.config = self.setupParam("~config", "baseline")
        self.cali_file_name = self.setupParam("~cali_file_name", "default")
        rospack = rospkg.RosPack()
        self.cali_file = rospack.get_path('duckietown') + \
          "/config/" + self.config + \
          "/vehicle_detection/vehicle_filter_node/" +  \
          self.cali_file_name + ".yaml"
        if not os.path.isfile(self.cali_file):
            rospy.logwarn("[%s] Can't find calibration file: %s.\n" %
                          (self.node_name, self.cali_file))
        self.loadConfig(self.cali_file)
        self.sub_corners = rospy.Subscriber("~corners",
                                            VehicleCorners,
                                            self.cbCorners,
                                            queue_size=1)
        self.pub_pose = rospy.Publisher("~pose", VehiclePose, queue_size=1)
        self.sub_info = rospy.Subscriber("~camera_info",
                                         CameraInfo,
                                         self.cbCameraInfo,
                                         queue_size=1)
        self.pcm = PinholeCameraModel()
        rospy.loginfo("[%s] Initialization completed" % (self.node_name))
        self.lock = mutex()
Exemple #27
0
def random_point_inside_fov(camera_info,
                            maxdist,
                            mindist=0,
                            Tcamera=np.eye(4)):
    """
  Generates a random XYZ point inside the camera field of view
  @type  camera_info: sensor_msgs.CameraInfo
  @param camera_info: Message with the meta information for a camera
  @type  maxdist:     float
  @param maxdist:     distance from the camera ref frame in the z direction
  @type  mindist:     float
  @param mindist:     distance from the camera ref frame in the z direction
  @type  Tcamera:     np.array
  @param Tcamera:     homogeneous transformation for the camera ref frame
  @rtype: array
  @return: The random XYZ point
  """
    cam_model = PinholeCameraModel()
    cam_model.fromCameraInfo(camera_info)
    z = np.random.uniform(mindist, maxdist)
    delta_x = cam_model.getDeltaX(cam_model.width / 2, z)
    delta_y = cam_model.getDeltaY(cam_model.height / 2, z)
    point = np.array([0, 0, z, 1])
    point[:2] = np.array([delta_x, delta_y
                          ]) * (2 * np.random.random_sample(2) - 1.)
    return np.dot(Tcamera, point)[:3]
Exemple #28
0
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()

        self.distance_between_centers = self.setupParam(
            'distance_between_centers', 0.0125)
        self.max_reproj_pixelerror_pose_estimation = self.setupParam(
            'max_reproj_pixelerror_pose_estimation', 5)

        self.sub_corners = rospy.Subscriber("~corners",
                                            VehicleCorners,
                                            self.processCorners,
                                            queue_size=1)

        self.pub_pose = rospy.Publisher("~pose", VehiclePose, queue_size=1)
        self.sub_info = rospy.Subscriber("~camera_info",
                                         CameraInfo,
                                         self.processCameraInfo,
                                         queue_size=1)
        self.pub_time_elapsed = rospy.Publisher("~pose_calculation_time",
                                                Float32,
                                                queue_size=1)
        self.pcm = PinholeCameraModel()
        rospy.loginfo("[%s] Initialization completed" % (self.node_name))
        self.lock = mutex()
 def from_camera_info(CameraConfig, message):
     ''' Load camera intrinsics from ROS camera info topic, initialize
     camera extrinsics with None value. '''
     model = PinholeCameraModel()
     model.fromCameraInfo(message)
     return CameraConfig(model.K, model.D, None,
                         (model.width, model.height), model.P, model.R)
    def __init__(self):
        """
            Constructor.
        """
        # Publishing rate
        self.rate = rospy.Rate(10)

        # Set camera info boolean flag
        self.cameraInfoInitialised = False

        # PinholeCameraModel object
        self.pcm = PinholeCameraModel()

        # Tf listener
        self.tf_listener = tf.TransformListener()

        # Constant path
        self.path = str(Path(os.path.dirname(os.path.abspath(__file__))).parents[0])

        # Poses, distance and marker publishers
        self.poses_pub = rospy.Publisher('poses', Poses, queue_size=5)
        self.distance_pub = rospy.Publisher('distances', Distances, queue_size=5)
        self.markers_pub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=5)

        # Camera info subscriber
        self.info_sub  = rospy.Subscriber("/xtion/rgb/camera_info", CameraInfo, self.setCameraInfo)