示例#1
0
文件: hw2.py 项目: daviswi/ROB456
        command.linear.x = 5.5
    else:
        command.linear.x = 0
        wayPoint = goalPos

    print(resetWp)
    print 'Current Angle: {0}'.format(wayPoint)
    pub.publish(command)


# main function call
if __name__ == "__main__":
    # Initialize the node
    rospy.init_node('lab2', log_level=rospy.DEBUG)

    # subscribe to laser scan message
    sub = message_filters.Subscriber('base_scan', LaserScan)

    # subscribe to odometry message
    sub2 = message_filters.Subscriber('odom', Odometry)

    # synchronize laser scan and odometry data
    ts = message_filters.TimeSynchronizer([sub, sub2], 10)
    ts.registerCallback(callback)

    # publish twist message
    pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)

    # Turn control over to ROS
    rospy.spin()
示例#2
0
def init():

    global flags
    global underwater_image_pub
    global underwater_camera_info_pub

    flags = tf.app.flags
    flags.DEFINE_integer("epoch", 1, "Epoch to train [25]")
    flags.DEFINE_float("learning_rate", 0.0002,
                       "Learning rate of for adam [0.0002]")
    flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
    flags.DEFINE_float("train_size", np.inf,
                       "The size of train images [np.inf]")
    flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
    flags.DEFINE_integer(
        "input_height", 480,
        "The size of image to use (will be center cropped). [108]")
    flags.DEFINE_integer(
        "input_width", 640,
        "The size of image to use (will be center cropped). If None, same value as input_height [None]"
    )
    flags.DEFINE_integer(
        "input_water_height", 1024,
        "The size of image to use (will be center cropped). [108]")
    flags.DEFINE_integer(
        "input_water_width", 1360,
        "The size of image to use (will be center cropped). If None, same value as input_height [None]"
    )
    flags.DEFINE_integer("output_height", 48,
                         "The size of the output images to produce [64]")
    flags.DEFINE_integer(
        "output_width", 64,
        "The size of the output images to produce. If None, same value as output_height [None]"
    )
    flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]")
    flags.DEFINE_float("max_depth", 1.5, "Dimension of image color. [3.0]")
    flags.DEFINE_string("water_dataset", "water_images",
                        "The name of dataset [celebA, mnist, lsun]")
    flags.DEFINE_string("input_fname_pattern", "*.png",
                        "Glob pattern of filename of input images [*]")
    flags.DEFINE_string("checkpoint_dir", "model",
                        "Directory name to save the models [model]")
    flags.DEFINE_string("results_dir", "results",
                        "Directory name to save the checkpoints [results]")
    flags.DEFINE_string("sample_dir", "samples",
                        "Directory name to save the image samples [samples]")
    flags.DEFINE_boolean("is_crop", True,
                         "True for training, False for testing [False]")
    flags.DEFINE_boolean("visualize", False,
                         "True for visualizing, False for nothing [False]")
    flags.DEFINE_integer(
        "save_epoch", 10,
        "The size of the output images to produce. If None, same value as output_height [None]"
    )
    flags.DEFINE_boolean(
        "use_batchsize_for_prediction", True,
        "Use batch size for prediction (True) or one sample only (False) [True]"
    )
    flags = flags.FLAGS

    if flags.input_width is None:
        flags.input_width = flags.input_height
    if flags.output_width is None:
        flags.output_width = flags.output_height

    if not os.path.exists(flags.checkpoint_dir):
        os.makedirs(flags.checkpoint_dir)
    if not os.path.exists(flags.sample_dir):
        os.makedirs(flags.sample_dir)

    # initialize image subscribers and publisher
    rospy.init_node('underwater_camera_node', anonymous=True)
    image_sub = message_filters.Subscriber('/depth_camera/image_raw', Image)
    depth_sub = message_filters.Subscriber('/depth_camera/depth/image_raw',
                                           Image)
    camera_info_sub = message_filters.Subscriber('/depth_camera/camera_info',
                                                 CameraInfo)
    underwater_image_pub = rospy.Publisher(
        '/underwater_camera_watergan/image_raw', Image, queue_size=10)
    underwater_camera_info_pub = rospy.Publisher(
        '/underwater_camera_watergan/camera_info', CameraInfo, queue_size=10)

    ts = message_filters.TimeSynchronizer(
        [image_sub, depth_sub, camera_info_sub], 10)
    ts.registerCallback(camera_callback)
示例#3
0
        rospy.loginfo("Great, You have {} CUDA device!".format(torch.cuda.device_count()))
        print ('Available devices ', torch.cuda.device_count())
        print ('Current cuda device ', torch.cuda.current_device())
    else:
        rospy.logerr("Sorry, You DO NOT have a CUDA device!")

    #Declare DDRNet model and load weights
    net = make_model('ddrnet', num_classes=12).cuda()
    load_checkpoint = torch.load(weight_file)

    state_dict = dict()
    for key in load_checkpoint['state_dict'].keys():
        new_key = key.replace('module.','')
        state_dict[new_key] = load_checkpoint['state_dict'][key]
    net.load_state_dict(state_dict)
    net.eval()

    # Declare a TF transform broadcaster
    tf_broadcaster = tf.TransformBroadcaster()

    depth_sub = message_filters.Subscriber(depth_image_topic, Image)
    rgb_sub = message_filters.Subscriber(rgb_image_topic, Image)
    pointcloud_sub = message_filters.Subscriber(pointcloud_topic, PointCloud2)
    camera_pose_sub = message_filters.Subscriber(camera_pose_topic, PoseStamped)

    ts = message_filters.TimeSynchronizer([depth_sub, rgb_sub,pointcloud_sub, camera_pose_sub], 1)
    ts.registerCallback(callback)

    print('SSC READY!')
    rospy.spin()
 4 def callback(image, camera_info):
 5   # Solve all of perception here...
 6 
 7 image_sub = message_filters.Subscriber('image', Image)
 8 info_sub = message_filters.Subscriber('camera_info', CameraInfo)
 9 
示例#5
0
    current_pos = [int(currentX), int(currentY)]

    result = aStarAlog(map2d, current_pos, [goal_p.x, goal_p.y])
    path = []
    for item in result:  # store the path as a list
        odom_scale_x = int(item.row * resolution + origin_X)
        odom_scale_y = int(item.col * resolution + origin_Y)
        path.append([odom_scale_x, odom_scale_y])
    ask_waypoint_callback(None)


if __name__ == "__main__":

    #global pub
    rospy.init_node("aStarPath")

    map_sub = message_filters.Subscriber('/map', OccupancyGrid)
    odom_sub = message_filters.Subscriber('/odom', Odometry)
    waypoint_bool_sub = rospy.Subscriber('/ask_waypoint', Bool,
                                         ask_waypoint_callback)

    waypoint_sub = rospy.Subscriber('/frontier_goal', Point, frontier_goal)
    ts = message_filters.TimeSynchronizer([map_sub, odom_sub], 10)

    pub = rospy.Publisher('/waypoints', Point)
    pub2 = rospy.Publisher('/frontier_goal_request', Bool)
    pub2.publish(True)
    ts.registerCallback(map_odom_callback)

    rospy.spin()
示例#6
0
    #out = model.encode(x)
    #   calc = f_model.calc()
    get_f = theano.function([x], out)

    #  get_nn = theano.function([x], calc)

    ann = libfann.neural_net()
    ann.create_from_file(networkPath)
    shape = (28, 28, 1)
    flat_shape = (1, 28 * 28)
    bridge = CvBridge()

    path = "/home/rik/nav-data/"
    rospy.init_node('live_error_test', anonymous=True)
    rospy.loginfo("Node initialized")

    odom_sub = message_filters.Subscriber('odom', Odometry)
    image_sub = message_filters.Subscriber('/sudo/bottom_webcam/image_raw',
                                           Image)

    ts = ApproximateTimeSynchronizer([odom_sub, image_sub], 100, 0.2)

    ts.registerCallback(live_test)

    rospy.loginfo("Callbacks registered")

    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down"
    cv2.destroyAllWindows()
示例#7
0
    def __init__(self,
                 boards,
                 service_check=True,
                 synchronizer=message_filters.TimeSynchronizer,
                 flags=0,
                 pattern=Patterns.Chessboard,
                 camera_name='',
                 checkerboard_flags=0,
                 max_chessboard_speed=-1,
                 queue_size=1):
        if service_check:
            # assume any non-default service names have been set.  Wait for the service to become ready
            for svcname in ["camera", "left_camera", "right_camera"]:
                remapped = rospy.remap_name(svcname)
                if remapped != svcname:
                    fullservicename = "%s/set_camera_info" % remapped
                    print("Waiting for service", fullservicename, "...")
                    try:
                        rospy.wait_for_service(fullservicename, 5)
                        print("OK")
                    except rospy.ROSException:
                        print("Service not found")
                        rospy.signal_shutdown('Quit')

        self._boards = boards
        self._calib_flags = flags
        self._checkerboard_flags = checkerboard_flags
        self._pattern = pattern
        self._camera_name = camera_name
        self._max_chessboard_speed = max_chessboard_speed
        lsub = message_filters.Subscriber('left', sensor_msgs.msg.Image)
        rsub = message_filters.Subscriber('right', sensor_msgs.msg.Image)
        ts = synchronizer([lsub, rsub], 4)
        ts.registerCallback(self.queue_stereo)

        msub = message_filters.Subscriber('image', sensor_msgs.msg.Image)
        msub.registerCallback(self.queue_monocular)

        self.set_camera_info_service = rospy.ServiceProxy(
            "%s/set_camera_info" % rospy.remap_name("camera"),
            sensor_msgs.srv.SetCameraInfo)
        self.set_left_camera_info_service = rospy.ServiceProxy(
            "%s/set_camera_info" % rospy.remap_name("left_camera"),
            sensor_msgs.srv.SetCameraInfo)
        self.set_right_camera_info_service = rospy.ServiceProxy(
            "%s/set_camera_info" % rospy.remap_name("right_camera"),
            sensor_msgs.srv.SetCameraInfo)

        self.q_mono = BufferQueue(queue_size)
        self.q_stereo = BufferQueue(queue_size)

        self.c = None

        self._last_display = None

        mth = ConsumerThread(self.q_mono, self.handle_monocular)
        mth.setDaemon(True)
        mth.start()

        sth = ConsumerThread(self.q_stereo, self.handle_stereo)
        sth.setDaemon(True)
        sth.start()
示例#8
0
    def __init__(self):
        # initialize the node named image_processing
        rospy.init_node('image_processing', anonymous=True)
        # initialize a publisher to send images from camera1 to a topic named image_topic1
        self.image_pub1 = rospy.Publisher("image_topic1", Image, queue_size=10)
        # initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data
        self.image_sub1 = message_filters.Subscriber(
            "/camera1/robot/image_raw", Image)
        #receive image from second camera
        self.image_sub2 = message_filters.Subscriber(
            "/camera2/robot/image_raw", Image)
        # Synchronize subscriptions into one callback
        timesync = message_filters.TimeSynchronizer(
            [self.image_sub1, self.image_sub2], 10)
        timesync.registerCallback(self.callback1)
        # initialize the bridge between openCV and ROS
        self.bridge = CvBridge()
        # initialize a publisher to send joints' angular position to a topic called joints_pos
        self.joints_pub = rospy.Publisher("joints_pos",
                                          Float64MultiArray,
                                          queue_size=10)
        #initialize a publisher to send position of orage sphere to to a topic name target_pos
        self.target_pub = rospy.Publisher("target_pos",
                                          Float64MultiArray,
                                          queue_size=10)

        #Publisher of all joints
        self.joint1_pub = rospy.Publisher(
            "/robot/joint1_position_controller/command",
            Float64,
            queue_size=10)
        self.joint2_pub = rospy.Publisher(
            "/robot/joint2_position_controller/command",
            Float64,
            queue_size=10)
        self.joint3_pub = rospy.Publisher(
            "/robot/joint3_position_controller/command",
            Float64,
            queue_size=10)
        self.joint4_pub = rospy.Publisher(
            "/robot/joint4_position_controller/command",
            Float64,
            queue_size=10)

        #save current angles so we can use least squares
        self.current_angles = [0, 0, 0, 0]
        #hardcoded them so that i do not calculate ratio every time
        self.pixel2meter_image1 = 0.03955706129619173
        self.pixel2meter_image2 = 0.0391203563104617

        # record the begining time
        self.time_initial = rospy.get_time()

        self.theta3 = np.array([0])
        self.previous_theta1 = 0
        self.previous_theta2 = 0
        #hardcode yellow and blue coordinates since it does not move and this improves accuracy
        self.detect_yellow_image1 = np.array([399, 532])
        self.detect_yellow_image2 = np.array([399, 532])
        self.detect_blue_image1 = np.array([399, 472])
        self.detect_blue_image2 = np.array([399, 472])

        # joint angles in last iteration
        self.q_prev_observed = np.array([0.0, 0.0, 0.0, 0.0])
        self.prev_pos = np.array([0.0, 0.0, 9.0])
        # initialize errors
        self.time_previous_step = np.array([rospy.get_time()], dtype='float64')
        # initialize error and derivative of error for trajectory tracking
        self.error = np.array([0.0, 0.0, 0.0], dtype='float64')
        self.error_d = np.array([0.0, 0.0, 0.0], dtype='float64')
        self.last_green_image1 = [0, 0]
        self.last_green_image2 = [0, 0]
        self.last_red_image1 = [0, 0]
        self.last_red_image2 = [0, 0]
示例#9
0
    def __init__(self, config):
        rospy.init_node("gqcnn_base_grasp", anonymous=True)

        # Moveit! setup
        moveit_commander.roscpp_initialize(sys.argv)
        self.robot = moveit_commander.RobotCommander()
        self.scene = moveit_commander.PlanningSceneInterface()
        self.arm = moveit_commander.MoveGroupCommander('arm')
        self.gripper = moveit_commander.MoveGroupCommander('gripper')
        self.arm.set_start_state_to_current_state()
        self.arm.set_pose_reference_frame('base')
        self.arm.set_planner_id('SBLkConfigDefault')
        self.arm.set_planning_time(10)
        self.arm.set_max_velocity_scaling_factor(0.04)
        self.arm.set_max_acceleration_scaling_factor(0.04)
        self.arm.set_goal_orientation_tolerance(0.1)
        self.arm.set_workspace([-2, -2, -2, 2, 2, 2])
        self.gripper.set_goal_joint_tolerance(0.2)
        rospy.loginfo(self.arm.get_pose_reference_frame())  #base
        rospy.loginfo(self.arm.get_planning_frame())  #world

        # messgae filter for image topic, need carefully set./camera/depth_registered/sw_registered/image_rect,,/camera/rgb/image_rect_color
        rospy.loginfo('wait_for_message')  ##############
        rospy.wait_for_message("/camera/rgb/image_raw", Image)  ###########
        rospy.wait_for_message("/camera/depth/image", Image)  ###########
        rospy.loginfo('start_callback')  ###########
        self.image_sub = message_filters.Subscriber("/camera/rgb/image_raw",
                                                    Image)
        self.depth_sub = message_filters.Subscriber("/camera/depth/image",
                                                    Image)
        self.camera_info_topic = "/camera/rgb/camera_info"
        self.camera_info = rospy.wait_for_message(self.camera_info_topic,
                                                  CameraInfo)
        rospy.loginfo(self.camera_info.header.frame_id)  ######

        self.ts = message_filters.ApproximateTimeSynchronizer(
            [self.image_sub, self.depth_sub], 1, 1)
        self.ts.registerCallback(self.cb)
        self.color_msg = Image()
        self.depth_msg = Image()
        self.mask = Image()

        # bounding box for objects
        self.bounding_box = BoundingBox()
        self.bounding_box.maxX = 420
        self.bounding_box.maxY = 250
        self.bounding_box.minX = 90
        self.bounding_box.minY = 40
        self.bridge = CvBridge()

        # transform listener
        self.listener = tf.TransformListener()

        # compute_ik service
        self.ik_srv = rospy.ServiceProxy('/compute_ik', GetPositionIK)
        rospy.loginfo("Waiting for /compute_ik service...")
        self.ik_srv.wait_for_service()
        rospy.loginfo("Connected!")
        self.service_request = PositionIKRequest()
        self.service_request.group_name = 'arm'
        self.service_request.timeout = rospy.Duration(2)
        self.service_request.avoid_collisions = True

        # signal
        self.start = 0
示例#10
0
    def __init__(self, chess_size, dim, approximate=0):
        self.board = ChessboardInfo()
        self.board.n_cols = chess_size[0]
        self.board.n_rows = chess_size[1]
        self.board.dim = dim

        # make sure n_cols is not smaller than n_rows, otherwise error computation will be off
        if self.board.n_cols < self.board.n_rows:
            self.board.n_cols, self.board.n_rows = self.board.n_rows, self.board.n_cols

        image_topic = rospy.resolve_name("monocular") + "/image_rect"
        camera_topic = rospy.resolve_name("monocular") + "/camera_info"

        tosync_mono = [
            (image_topic, sensor_msgs.msg.Image),
            (camera_topic, sensor_msgs.msg.CameraInfo),
        ]

        if approximate <= 0:
            sync = message_filters.TimeSynchronizer
        else:
            sync = functools.partial(ApproximateTimeSynchronizer,
                                     slop=approximate)

        tsm = sync([
            message_filters.Subscriber(topic, type)
            for (topic, type) in tosync_mono
        ], 10)
        tsm.registerCallback(self.queue_monocular)

        left_topic = rospy.resolve_name("stereo") + "/left/image_rect"
        left_camera_topic = rospy.resolve_name("stereo") + "/left/camera_info"
        right_topic = rospy.resolve_name("stereo") + "/right/image_rect"
        right_camera_topic = rospy.resolve_name(
            "stereo") + "/right/camera_info"

        tosync_stereo = [(left_topic, sensor_msgs.msg.Image),
                         (left_camera_topic, sensor_msgs.msg.CameraInfo),
                         (right_topic, sensor_msgs.msg.Image),
                         (right_camera_topic, sensor_msgs.msg.CameraInfo)]

        tss = sync([
            message_filters.Subscriber(topic, type)
            for (topic, type) in tosync_stereo
        ], 10)
        tss.registerCallback(self.queue_stereo)

        self.br = cv_bridge.CvBridge()

        self.q_mono = Queue()
        self.q_stereo = Queue()

        mth = ConsumerThread(self.q_mono, self.handle_monocular)
        mth.setDaemon(True)
        mth.start()

        sth = ConsumerThread(self.q_stereo, self.handle_stereo)
        sth.setDaemon(True)
        sth.start()

        self.mc = MonoCalibrator([self.board])
        self.sc = StereoCalibrator([self.board])
示例#11
0
    # Close matcher after 5 seconds
    time.sleep(5)
    proc.send_signal(signal.SIGINT)
    rospy.loginfo("GPU matcher closed...")
    # Start again macther after 5 seconds of being closed
    #time.sleep(5)
    proc = subprocess.Popen(['rosrun', 'ug_stereomatcher', 'UG_matcher_gpu'])
    rospy.loginfo("GPU matcher openned...")


# ********* MAIN *********
if __name__ == '__main__':
    global proc

    rospy.init_node('RHmatcher_hack', anonymous=True)
    ### subscribers
    subDV = message_filters.Subscriber('output_disparityV', DisparityImage)
    subDH = message_filters.Subscriber('output_disparityH', DisparityImage)
    ts = message_filters.TimeSynchronizer([subDH, subDV], 1)
    ts.registerCallback(messagesCB)

    subDVf = message_filters.Subscriber('output_stackV', foveatedstack)
    subDHf = message_filters.Subscriber('output_stackH', foveatedstack)
    tsf = message_filters.TimeSynchronizer([subDHf, subDVf], 1)
    tsf.registerCallback(messagesCBF)

    proc = subprocess.Popen(['rosrun', 'ug_stereomatcher', 'UG_matcher_gpu'])

    rospy.loginfo("matcher.py node ready!")
    rospy.spin()
                        picto.action = Pictogram.ADD
                        picto.color = ColorRGBA(1.0, 0.0, 0.0, 0.8)
                        rospy.loginfo("%s early taking off %s [s]", ref_st.header.frame_id, str(ref_st.state.remaining_time))
                    else:
                        continue
            picto.header.frame_id = ref_st.header.frame_id
            picto.header.stamp = ref_st.header.stamp
            arr.pictograms.append(picto)
        if len(arr.pictograms) > 0:
            pub.publish(arr)
        ref_contact_states_queue.pop(0)
        act_contact_states_queue.pop(0)
    ref_contact_states_queue.append(ref)
    act_contact_states_queue.append(act)

if __name__ == "__main__":
    rospy.init_node("landing_time_detector")

    pub = rospy.Publisher("~pictogram_array", PictogramArray)
    ref_contact_states_sub = message_filters.Subscriber('~input_ref', ContactStatesStamped)
    act_contact_states_sub = message_filters.Subscriber('~input_act', ContactStatesStamped)

    buffer_size = 5
    ref_contact_states_queue = []
    act_contact_states_queue = []

    ts = message_filters.TimeSynchronizer([ref_contact_states_sub, act_contact_states_sub], 10)
    ts.registerCallback(callback)

    rospy.spin()
示例#13
0
        pose = KpsToGrasppose(ret, img, depth_raw, M_CL, M_BL, cameraMatrix)
        msg2 = Float64MultiArray()
        msg2.data = pose
        pub_res.publish(msg2)

    except CvBridgeError as e:
        print(e)


if __name__ == '__main__':
    # initialize ros node
    rospy.init_node("Bin_picking")

    # Bridge to convert ROS Image type to OpenCV Image type
    cv_bridge = CvBridge()
    cv2.WITH_QT = False
    # Get camera calibration parameters
    cam_param = rospy.wait_for_message('/camera/rgb/camera_info',
                                       CameraInfo,
                                       timeout=None)

    # Subscribe to rgb and depth channel
    image_sub = message_filters.Subscriber("/camera/rgb/image_rect_color",
                                           Image)
    depth_sub = message_filters.Subscriber("/camera/depth_registered/image",
                                           Image)
    ts = message_filters.ApproximateTimeSynchronizer([image_sub, depth_sub], 1,
                                                     0.1)
    ts.registerCallback(kinect_rgbd_callback)

    rospy.spin()
    def __init__(self,
                 camera_name,
                 rgb_topic,
                 depth_topic,
                 camera_info_topic,
                 choice=None):
        """
        @brief      A class to obtain time synchronized RGB and Depth frames from a camera topic and find the
                    3D position of the point wrt the required frame of reference.

        @param      camera_name        Just a relevant name for the camera being used.
        @param      rgb_topic          The topic that provides the rgb image information. 
        @param      depth_topic        The topic that provides the depth image information. 
        @param      camera_info_topic  The topic that provides the camera information. 
        @param      choice             If the camera used is a real or simulated camera based on commandline arg.
        """
        self.camera_name = camera_name
        self.rgb_topic = rgb_topic
        self.depth_topic = depth_topic
        self.camera_info_topic = camera_info_topic
        self.choice = choice

        self.poses = []
        self.rays = []
        self.OBlobs_x = []
        self.OBlobs_y = []
        self.OBlobs_z = []
        self.colors = []
        self.is_updated = False
        self.found_objects = False
        self.latest_rgb = None
        self.latest_depth_32FC1 = None

        tfBuffer = tf2_ros.Buffer()
        self.br = tf2_ros.TransformBroadcaster()
        self.lis = tf2_ros.TransformListener(tfBuffer)

        self.bridge = CvBridge()

        self.camera_model = image_geometry.PinholeCameraModel()

        # self.marker_pub = rospy.Publisher('visualization_marker', Marker, queue_size=10)
        # cv2.namedWindow("Image window", cv2.WINDOW_NORMAL)
        # cv2.setMouseCallback("Image window", self.mouse_callback)
        # self.br = tf.TransformBroadcaster()
        # self.lis = tf.TransformListener()
        # # Have we processed the camera_info and image yet?
        # self.ready_ = False
        q = 1

        self.pose3D_pub = rospy.Publisher('object_location',
                                          OBlobs,
                                          queue_size=q)

        self.sub_rgb = message_filters.Subscriber(rgb_topic,
                                                  Image,
                                                  queue_size=q)

        self.sub_depth = message_filters.Subscriber(depth_topic,
                                                    Image,
                                                    queue_size=q)

        self.sub_camera_info = message_filters.Subscriber(camera_info_topic,
                                                          CameraInfo,
                                                          queue_size=q)
        # self.tss = message_filters.ApproximateTimeSynchronizer([self.sub_rgb, self.sub_depth, self.sub_camera_info], queue_size=15, slop=0.4)
        self.tss = message_filters.ApproximateTimeSynchronizer(
            [self.sub_rgb, self.sub_depth, self.sub_camera_info],
            queue_size=q,
            slop=0.5)
        #self.tss = message_filters.TimeSynchronizer([sub_rgb], 10)
        self.tss.registerCallback(self.callback)

        rospy.loginfo('Camera {} initialised, {}, {}, {}'.format(
            self.camera_name, rgb_topic, depth_topic, camera_info_topic))
示例#15
0
    def __init__(self):
        self.image_topic = rospy.get_param('~image_topic')
        self.fusion_topic = "/detection/fusion_tools/objects"  # rospy.get_param('~fusion_topic')
        self.lanefusion_t = "/lane_fusion"
        self.output_image = rospy.get_param('~output_image')
        self.output_lane = rospy.get_param('~output_lane')
        self.weight_path = rospy.get_param('~weight_path')
        self.use_gpu = rospy.get_param('~use_gpu')

        self.init_lanenet()
        self.bridge = CvBridge()

        ## None sync (2 separate msg handlers)
        ## sub_image = rospy.Subscriber(self.image_topic, Image, self.img_callback, queue_size=1)
        ## sub_fused = rospy.Subscriber(self.fusion_topic, DetectedObjectArray, self.objs_callback, queue_size=1)
        image_sub = message_filters.Subscriber(self.image_topic, Image)
        fused_sub = message_filters.Subscriber(self.fusion_topic,
                                               DetectedObjectArray)
        tsync_sub = message_filters.ApproximateTimeSynchronizer(
            [image_sub, fused_sub], 32, 0.1, allow_headerless=True)
        tsync_sub.registerCallback(self.sync_cb)

        self.oboi = ['person', 'bicycle', 'car', 'motorbike', 'bus',
                     'truck']  # 'traffic light', 'stop sign' later
        # how to sync up ingest speed with prior image, use simple solution for now:
        # - At entry of img_callback, set lock / flag so subsequent DetectedObjectArray won't enqueue
        # - At exit of img_callback, release the flag
        # - At entry of objs_callback, simply return when sync queue is locked
        # filtered objs
        self.objs = []
        self.sync = False

        self.pub_image = rospy.Publisher(self.output_image,
                                         Image,
                                         queue_size=4)
        self.pub_lfuse = rospy.Publisher(self.lanefusion_t,
                                         LaneObstacle,
                                         queue_size=4)
        self.lane_obst = LaneObstacle()
        self.lane_obst.radius = 5000.0  #  (m)
        self.lane_obst.centerdev = 0.0  # (cm)
        self.lane_obst.anglerror = 0.0  #  (o)
        self.lane_obst.dist2obst_e = -1.0  #  (m)
        self.lane_obst.dist2obst_l = -1.0  #  (m)
        self.lane_obst.dist2obst_r = -1.0  #  (m)

        self.h = 600  #  pix
        self.w = 800  #  pix

        self.ym_per_pix = 20. / 600  #  m/pix
        self.xm_per_pix = 3.6 / 540  #  m/pix
        self.M = np.array([[-9.75609756e-01, -1.31707317e+00, 7.90243902e+02],
                           [-2.84217094e-16, -2.92682927e+00, 1.17073171e+03],
                           [-5.26327952e-19, -3.29268293e-03, 1.00000000e+00]])
        self.Minv = np.array(
            [[3.25000000e-01, -4.50000000e-01, 2.70000000e+02],
             [0.00000000e+00, -3.41666667e-01, 4.00000000e+02],
             [0.00000000e+00, -1.12500000e-03, 1.00000000e+00]])

        self.lanes_image = None
        self.llane_image = None
        self.rlane_image = None

        # L/R lane line instances
        self.lline = Laneline()
        self.rline = Laneline()
        self.q_RDH = dq([], maxlen=10)

        # Set initial ^2 fitting parameters, for ego lane's center
        self.q_fit = dq([], maxlen=5)  # vertical at pixel 400
        self.q_fit.append(np.array([0.0, 0.0, 400.]))

        self.count = 0
示例#16
0
    def __init__(self):
        # Sub topics
        self.topic_rgb_image = '/zed/rgb/image_rect_color'
        self.topic_depth_image = '/zed/depth/depth_registered'  # Image: 32-bit depth values in meters
        # Pub topics
        self.topic_yolo_output_img = 'yolo/labelled_img'
        self.topic_tracker_output_img = 'labelled_img'

        self.new_img_received = False
        self.rgb_img = None
        self.rgb_img_output = None
        self.depth_img = None
        self.bridge = CvBridge()

        # Vars Tracker
        self.selected_target_bbox = None # Stores the bbox of the selected target, shortlisted from yolo_bbox_arr
        self.yolo_bbox_arr = [] # Stores array of bboxes returned by object detection code
        self.target_class = 'person'  # The class to be tracked.
        self.tracker_min_iou = 0.3  # Min value of IoU for a bbox to be considered same as tracked object.
        self.tracker_init_min_iou = 0.2  # This is used for first initialization of tracker only
        self.threshold_confidence = 0.4  # prob of class must be greater than this threshold for us to use it.
        self.tracker = None  # Object to store tracker: cv2.TrackerKCF_create() or cv2.TrackerMOSSE_create()
        self.fps = FPS().start()

        # Load yolo config
        rospack = rospkg.RosPack()
        path = rospack.get_path('sdroller_track_detect')
        object_detection_config = rospy.get_param("/object_detection_config")
        self.config = yaml.load(open(object_detection_config))
        self.yolo = YOLO(path + self.config['classification']['model'], path +
                         self.config['classification']['anchors'], path + self.config['classification']['classes'])

        # Subscribers
        '''We increase the default buff_size because it is smaller than the camera's images, and so if the node doesn't
        processing them fast enough, there will be a number of images backed up in some queue and it appears as a
        lag in the video stream.
        We also use the TimeSynchronizer to subscribe to multiple topics.
        '''
        buff_size_bytes = 8 * 1024 * 1024  # 8MB
        self.rgb_img_sub = message_filters.Subscriber(
            self.topic_rgb_image, Image, queue_size=1, buff_size=buff_size_bytes)
        self.depth_img_sub = message_filters.Subscriber(
            self.topic_depth_image, Image, queue_size=1, buff_size=buff_size_bytes)
        self.ts = message_filters.ApproximateTimeSynchronizer(
            [self.rgb_img_sub, self.depth_img_sub], 1, 0.1, allow_headerless=False)
        self.ts.registerCallback(self.callback_rgb_depth_imgs)

        # Publish the image with detection boxes drawn
        self.yolo_labelled_img_pub = rospy.Publisher(self.topic_yolo_output_img, Image, queue_size=1)
        self.tracker_labelled_img_pub = rospy.Publisher(self.topic_tracker_output_img, Image, queue_size=1)

        # Get image details
        rgb_img_sample = rospy.wait_for_message(self.topic_rgb_image, Image)
        self.img_height = rgb_img_sample.height
        self.img_width = rgb_img_sample.width

        # Set the value of tracker_bbox to middle of the image, where a person normally detected standing in front of robot
        self.tracker_bbox = bbox()
        self.tracker_bbox.xmin = int(self.img_width * (1.0 / 3))
        self.tracker_bbox.ymin = int(self.img_height * (1.0 / 4))
        self.tracker_bbox.xmax = int(self.img_width * (2.0 / 3))
        self.tracker_bbox.ymax = int(self.img_height)
    def __init__(self):
        self.SIZE = 1280 * 960 * 3  # Just to set the buffer_size

        self.THRESH = CLASS_THRESHOLDS

        self.MAP_SIDE = 1000
        self.SCALE = 20

        publish_rois_debug = rospy.get_param('~publish_rois_debug', False)
        self.draw_arrows = rospy.get_param('~draw_arrows', True)
        self.y_offset = rospy.get_param('~y_offset', 0)
        self.roi_height = rospy.get_param('~height', 768)

        self.pub_demo = rospy.Publisher('nice_demo', Image, queue_size=10)
        self.pub_complex_demo = rospy.Publisher('very_nice_demo',
                                                Image,
                                                queue_size=10)
        self.pub_map = rospy.Publisher('map', Image, queue_size=10)
        if publish_rois_debug:
            self.pub_rois = rospy.Publisher('rois_debug', Image, queue_size=10)
        self.bridge = CvBridge()

        map_sync = message_filters.TimeSynchronizer([
            message_filters.Subscriber(
                "topdown_view", Image, buff_size=self.SIZE * 2),
            message_filters.Subscriber(
                "obstacles", numpy_msg(ObstacleList), buff_size=self.SIZE * 2)
        ], 10)

        bboxes_sync = message_filters.TimeSynchronizer([
            message_filters.Subscriber("/stereo_camera/left/image_rect_color",
                                       Image,
                                       buff_size=self.SIZE * 2),
            message_filters.Subscriber("filtered_detection",
                                       numpy_msg(ObstacleList),
                                       buff_size=self.SIZE * 2)
        ], 10)

        freemap_sync = message_filters.TimeSynchronizer([
            message_filters.Subscriber("/stereo_camera/left/image_rect_color",
                                       Image,
                                       buff_size=self.SIZE * 2),
            message_filters.Subscriber("filtered_detection",
                                       numpy_msg(ObjectList),
                                       buff_size=self.SIZE * 2),
            message_filters.Subscriber(
                "free_mask", Image, buff_size=self.SIZE * 2)
        ], 10)

        map_sync.registerCallback(self.map_callback)
        bboxes_sync.registerCallback(self.bboxes_callback)
        freemap_sync.registerCallback(self.freemap_callback)

        if publish_rois_debug:
            externalrois_sync = message_filters.TimeSynchronizer([
                message_filters.Subscriber(
                    "/stereo_camera/left/image_rect_color",
                    Image,
                    buff_size=self.SIZE * 2),
                message_filters.Subscriber("filtered_detection",
                                           numpy_msg(ObjectList),
                                           buff_size=self.SIZE * 2),
                message_filters.Subscriber("/stereo_camera/obstacles",
                                           numpy_msg(ObstacleList),
                                           buff_size=self.SIZE * 2),
                message_filters.Subscriber("/stereo_camera/freeobs_mask",
                                           Image,
                                           buff_size=self.SIZE * 2)
            ], 10)
            externalrois_sync.registerCallback(self.externalrois_callback)
示例#18
0
    def __init__(self,opt,save_img=False):
        source, weights, self.view_img, self.save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
        self.webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
            ('rtsp://', 'rtmp://', 'http://'))
        
        self.save_img = save_img

        # Directories
        # self.save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run
        # (self.save_dir / 'labels' if self.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir

        # Initialize
        set_logging()
        self.device = select_device(opt.device)

        self.half = self.device.type != 'cpu'  # half precision only supported on CUDA

        # Load model
        self.model = attempt_load(weights, map_location=self.device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=self.model.stride.max())  # check img_size
        if self.half:
            self.model.half()  # to FP16

        #"Fusing layers..."

        # Second-stage classifier
        self.classify = False
        if self.classify:
            modelc = load_classifier(name='resnet101', n=2)  # initialize
            modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=self.device)['model']).to(self.device).eval()

        # Get names and colors
        self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
        self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]

        # Run inference
        t0 = time.time()
        img = torch.zeros((1, 3, imgsz, imgsz), device=self.device)  # init img
        _ = self.model(img.half() if self.half else img) if self.device.type != 'cpu' else None  # run once   

        # ROS
        self.bridge = CvBridge()
        self.pub = rospy.Publisher('/gesture_detection_image', Image,queue_size=10)
        # rgb_image_sub = rospy.Subscriber("/spencer/sensors/rgbd_front_top/color/image_raw", Image, self.image_callback)

        rgb_image_sub = message_filters.Subscriber("/spencer/sensors/rgbd_front_top/color/image_raw", Image)
        # depth_image_sub = message_filters.Subscriber('/spencer/sensors/rgbd_front_top/depth/image_rect_raw', Image)
        
        depth_image_sub = message_filters.Subscriber('/spencer/sensors/rgbd_front_top/aligned_depth_to_color/image_raw', Image)

        cmd_vel_sub = message_filters.Subscriber('/tb_control/wheel_odom', Odometry)
        self.time_sync = message_filters.ApproximateTimeSynchronizer([rgb_image_sub, depth_image_sub,cmd_vel_sub], 10, 1)
        # self.time_sync = message_filters.ApproximateTimeSynchronizer([rgb_image_sub, depth_image_sub], 5, 0.2)

        self.time_sync.registerCallback(self.image_callback)

        self.gesture_pub = rospy.Publisher('/gesture_recognition_result', Header, queue_size=10)

        self.distance_threshold = 450
        self.conf_threshold = 0.7
        self.last_gesture = None
        self.last_detect_time = 0
        self.timeout = rospy.Duration(3)
示例#19
0
    throttle_np = np.zeros((1, ))
    throttle_np[0] = throttle.pedal_cmd
    throttle_list.append(throttle_np)

    steering_np = np.zeros((1, ))
    steering_np[0] = steering.steering_wheel_angle_cmd
    steering_list.append(steering_np)

    brake_np = np.zeros((1, ))
    brake_np[0] = brake.pedal_cmd
    brake_list.append(brake_np)


if __name__ == '__main__':
    img_sub = message_filters.Subscriber(
        '/vehicle/front_camera/image_rect_color', Image)
    pc2_sub = message_filters.Subscriber('/vehicle/velodyne_points',
                                         PointCloud2)
    imu_sub = message_filters.Subscriber('/vehicle/imu/data_raw', Imu)
    cmd_vel_sub = message_filters.Subscriber('/vehicle/cmd_vel', Twist)
    throttle_sub = message_filters.Subscriber('/vehicle/throttle_cmd',
                                              ThrottleCmd)
    steering_sub = message_filters.Subscriber('/vehicle/steering_cmd',
                                              SteeringCmd)
    brake_sub = message_filters.Subscriber('/vehicle/brake_cmd', BrakeCmd)

    ts = message_filters.ApproximateTimeSynchronizer([
        img_sub, pc2_sub, imu_sub, cmd_vel_sub, throttle_sub, steering_sub,
        brake_sub
    ],
                                                     10,
示例#20
0
    cv_img_wv5 = cv.cvtColor(cv_img_wv5, cv.COLOR_BGR2GRAY)

    #Fusion
    cv_img_merged = np.zeros(cv_img_wv1.shape)
    cv.accumulate(cv_img_wv1, cv_img_merged);
    cv.accumulate(cv_img_wv2, cv_img_merged);
    cv.accumulate(cv_img_wv3, cv_img_merged);
    cv.accumulate(cv_img_wv4, cv_img_merged);
    cv.accumulate(cv_img_wv5, cv_img_merged);
    cv_img_merged = cv_img_merged/5
    cv_img_merged = cv_img_merged.astype(np.uint8)

    #Publish
    message = bridge.cv2_to_imgmsg(cv_img_merged, encoding="mono8")
    message.header.stamp = stamp

    pub.publish(message)

image_sub_wv1 = message_filters.Subscriber('/camera/image_wavelength_0', Image) #644
image_sub_wv2 = message_filters.Subscriber('/camera/image_wavelength_1', Image) #653
image_sub_wv3 = message_filters.Subscriber('/camera/image_wavelength_9', Image) #661
image_sub_wv4 = message_filters.Subscriber('/camera/image_wavelength_10', Image) #607
image_sub_wv5 = message_filters.Subscriber('/camera/image_wavelength_20', Image) #701

print('initialized')
rospy.init_node('HyperspectralFusion')
pub = rospy.Publisher('/camera/image_hyperspec', Image, queue_size=10)
ts = message_filters.ApproximateTimeSynchronizer([image_sub_wv1, image_sub_wv2, image_sub_wv3, image_sub_wv4, image_sub_wv5], 10, 0.1, allow_headerless=False)
ts.registerCallback(callback)
rospy.spin()
    def __init__(self, ctx, output_world, mesh_dir, reference_frame):
        self.human_cameras_ids = {}
        self.ctx = ctx
        self.human_bodies = {}
        self.target = ctx.worlds[output_world]
        self.target_world_name = output_world
        self.reference_frame = reference_frame
        self.mesh_dir = mesh_dir
        self.human_meshes = {}
        self.human_aabb = {}

        self.nb_gaze_detected = {}
        self.human_perception_to_uwds_ids = {}

        self.detection_time = None
        self.reco_durations = []
        self.record_time = False

        self.robot_name = rospy.get_param("robot_name", "pepper")
        self.is_robot_moving = rospy.get_param("is_robot_moving", False)

        self.ros_pub = {"voice": rospy.Publisher("multimodal_human_monitor/voice_attention_point", PointStamped, queue_size=5),
                        "gaze": rospy.Publisher("multimodal_human_monitor/gaze_attention_point", PointStamped, queue_size=5),
                        "reactive": rospy.Publisher("multimodal_human_monitor/monitoring_attention_point", PointStamped, queue_size=5),
                        "monitoring_attention_point": rospy.Publisher("head_manager/head_monitoring_target", TargetWithPriority, queue_size=5),
                        "tf": rospy.Publisher("/tf", TFMessage, queue_size=10)}

        self.log_pub = {"isLookingAt": rospy.Publisher("predicates_log/lookingat", String, queue_size=5),
                        "isPerceiving": rospy.Publisher("predicates_log/perceiving", String, queue_size=5),
                        "isSpeaking": rospy.Publisher("predicates_log/speak", String, queue_size=5),
                        "isSpeakingTo": rospy.Publisher("predicates_log/speakingto", String, queue_size=5),
                        "isNear": rospy.Publisher("predicates_log/near", String, queue_size=5),
                        "isClose": rospy.Publisher("predicates_log/close", String, queue_size=5),
                        "isMonitoring": rospy.Publisher("predicates_log/monitoring", String, queue_size=5)}

        self.ros_sub = {"gaze": message_filters.Subscriber("wp2/gaze", GazeInfoArray),
                        "voice": message_filters.Subscriber("wp2/voice", VoiceActivityArray),
                        "person": message_filters.Subscriber("wp2/track", TrackedPersonArray)}

        self.ros_services = {"monitor_humans": rospy.Service("multimodal_human_monitor/monitor_humans", MonitorHumans, self.handle_monitor_humans),
                             "find_alternate_id": rospy.Service("multimodal_human_monitor/find_alternate_id", FindAlternateId, self.handle_find_alternate_id),
                             "global_monitoring": rospy.Service("multimodal_human_monitor/global_monitoring", SetBool, self.handle_global_monitoring)}

        self.ts = message_filters.TimeSynchronizer([self.ros_sub["gaze"], self.ros_sub["voice"], self.ros_sub["person"]], 50)

        self.ts.registerCallback(self.callback)

        self.head_signal_dq = deque()

        self.already_removed_nodes = []

        self.current_situations_map = {}

        self.humans_to_monitor = []

        self.reco_id_table = {}
        self.inv_reco_id_table = {}

        self.human_perceived = []
        self.previous_human_perceived = []

        self.human_speaking = []
        self.previous_human_speaking = []

        self.human_speakingto = {}
        self.previous_human_speakingto = {}

        self.human_lookat = {}
        self.previous_human_lookat = {}

        self.human_distances = {}
        self.human_close = []
        self.previous_human_close = []
        self.human_near = []
        self.previous_human_near = []

        self.is_active = True

        self.tf_buffer = tf2_ros.Buffer(rospy.Duration(TF_CACHE_TIME), debug=False)
        self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)

        try:
            nodes_loaded = ModelLoader().load(self.mesh_dir + "face.blend", self.ctx,
                                              world=output_world, root=None, only_meshes=True,
                                              scale=1.0)
            for n in nodes_loaded:
                if n.type == MESH:
                    self.human_meshes["face"] = n.properties["mesh_ids"]
                    self.human_aabb["face"] = n.properties["aabb"]
        except Exception as e:
            rospy.logwarn("[multimodal_human_provider] Exception occurred with %s : %s" % (self.mesh_dir + "face.blend", str(e)))
示例#22
0
					# Point(odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z), 
					# Vector3(*vel / 5), 
					Vector3(0, 0, 0),
					# Vector3(*acc / 5),
					Vector3(0, 0, 0),
					yaw, 
					# 0,
					# pos_cmd[10] / 5,
					0,
					[0, 0, 0], [0, 0, 0], id, PositionCommand.TRAJECTORY_STATUS_READY)
				
				pos_cmd_publisher.publish(pos_cmd)
				rospy.loginfo("[POS CMD: pos_cmd\ {} n".format(pos_cmd))
			id += 1

rospy.init_node('playback_node')

rospy.loginfo("started node\n")

image_sub = message_filters.Subscriber('/pcl_render_node/depth', SensorImage)
# pos_cmd_sub = message_filters.Subscribe('/planning/pos_cmd', PositionCommand)
odom_sub = message_filters.Subscriber('/visual_slam/odom', Odometry)

# rospy.Subscriber("/visual_slam/odom", Odometry, callback, buff_size=10)
rospy.Subscriber("/move_base_simple/goal", PoseStamped, goalCallback, buff_size=10)

# ts = message_filters.ApproximateTimeSynchronizer([image_sub, odom_sub], 5, 0.1, allow_headerless=True)
ts = message_filters.ApproximateTimeSynchronizer([image_sub, odom_sub], 100, 1)
ts.registerCallback(callback)

rospy.spin()
示例#23
0
    def __init__(self):
        """
            Constructor.
        """
        # Detection target ID (person)
        self.target = 15

        # visitor detected
        self.visitor = 0
        self.confirm_attempts = 0

        # Minimum confidence for acceptable detections
        self.confidence = 0.5

        # Converted depth_image
        self.depth_image = None

        # Publishing rate
        self.rate = rospy.Rate(10)

        # Number of detections
        self.number_of_detections = 0

        # Detection messages
        self.detections = Detections()

        # Constant path
        self.path = str(
            Path(os.path.dirname(os.path.abspath(__file__))).parents[0])

        # Define detection's target/s
        self.targets = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        # Bounding boxes self.colours
        self.colours = np.random.uniform(0, 255, size=(len(self.targets), 3))

        # Load the neural network serialised model
        self.net = cv2.dnn.readNetFromCaffe(
            self.path + "/data/nn_params/MobileNetSSD_deploy.prototxt.txt",
            self.path + "/data/nn_params/MobileNetSSD_deploy.caffemodel")

        # Distance (human-robot distance) and detection publishers
        self.detection_pub = rospy.Publisher('detections',
                                             Detections,
                                             queue_size=1)

        print("[INFO] Successful DNN Initialisation")

        # Subscriptions (via Subscriber package)
        rgb_sub = message_filters.Subscriber(
            "/xtion/rgb/image_rect_color", Image,
            queue_size=None)  ## sam added queue size = None ##
        # depth_sub = message_filters.Subscriber("/xtion/depth_registered/hw_registered/image_rect_raw", Image)
        depth_sub = message_filters.Subscriber(
            "/xtion/depth_registered/image_raw", Image,
            queue_size=None)  ## sam added queue size = None ##

        # Synchronize subscriptions
        ats = message_filters.ApproximateTimeSynchronizer(
            [rgb_sub, depth_sub], queue_size=1,
            slop=0.1)  ## sam changed queue size from 5 and slop from 0.5
        ats.registerCallback(self.processSubscriptions)
示例#24
0
        if (name == target_model_name):
            break
        idx += 1
    # Get pose:
    current_pose = all_states.pose[idx]
    x = goal.position.x
    y = goal.position.y
    z = goal.position.z
    rx = goal.orientation.x
    ry = goal.orientation.y
    rz = goal.orientation.z
    rw = goal.orientation.w


rospy.init_node("p2p_move", anonymous=True)
state_sub = message_filters.Subscriber("/gazebo/model_states", ModelStates)
goal_sub = message_filters.Subscriber("/get_goal", Pose)
# goal_sub = message_filters.Subscriber("husky/get_goal", Pose)
ts = message_filters.ApproximateTimeSynchronizer([state_sub, goal_sub],
                                                 10,
                                                 0.1,
                                                 allow_headerless=True)
ts.registerCallback(callback)
# rospy.Subscriber("/gazebo/model_states", ModelStates, callback)
pub = rospy.Publisher("/gazebo/set_model_state", ModelState, queue_size=10)
target_pose = Pose()
target_state = ModelState()
velocity = 0.1
angular_velocity = 0.2
rospy.loginfo("Ready")
rate = rospy.Rate(100)
示例#25
0
        json.dump(mpu9150_6_twist, open('./data/mpu9150_6_twist.txt','w'))
        json.dump(mpu9150_7_twist, open('./data/mpu9150_7_twist.txt','w'))
        json.dump(mpu9150_8_twist, open('./data/mpu9150_8_twist.txt','w'))
        json.dump(mpu9150_9_twist, open('./data/mpu9150_9_twist.txt','w'))
        json.dump(mpu9150_10_twist, open('./data/mpu9150_10_twist.txt','w'))
        json.dump(mpu9150_11_twist, open('./data/mpu9150_11_twist.txt','w'))
        json.dump(mpu9150_12_twist, open('./data/mpu9150_12_twist.txt','w'))
        json.dump(mpu9150_13_twist, open('./data/mpu9150_13_twist.txt','w'))
        json.dump(mpu9150_14_twist, open('./data/mpu9150_14_twist.txt','w'))
        json.dump(mpu9150_15_twist, open('./data/mpu9150_15_twist.txt','w'))
    counter = counter -1


rospy.init_node('mpu9150')

mpu9150_0_sub = message_filters.Subscriber('mpu9150_0', Imu)
mpu9150_1_sub = message_filters.Subscriber('mpu9150_1', Imu)
mpu9150_2_sub = message_filters.Subscriber('mpu9150_2', Imu)
mpu9150_3_sub = message_filters.Subscriber('mpu9150_3', Imu)
mpu9150_4_sub = message_filters.Subscriber('mpu9150_4', Imu)
mpu9150_5_sub = message_filters.Subscriber('mpu9150_5', Imu)
mpu9150_6_sub = message_filters.Subscriber('mpu9150_6', Imu)
mpu9150_7_sub = message_filters.Subscriber('mpu9150_7', Imu)
mpu9150_8_sub = message_filters.Subscriber('mpu9150_8', Imu)
mpu9150_9_sub = message_filters.Subscriber('mpu9150_9', Imu)
mpu9150_10_sub = message_filters.Subscriber('mpu9150_10', Imu)
mpu9150_11_sub = message_filters.Subscriber('mpu9150_11', Imu)
mpu9150_12_sub = message_filters.Subscriber('mpu9150_12', Imu)
mpu9150_13_sub = message_filters.Subscriber('mpu9150_13', Imu)
mpu9150_14_sub = message_filters.Subscriber('mpu9150_14', Imu)
mpu9150_15_sub = message_filters.Subscriber('mpu9150_15', Imu)
    def __init__(self):
        rospy.init_node('mapping_node')

        # Mapping Constants
        self.origin = Pose()
        self.origin.position.x = 0.
        self.origin.position.y = 0.
        self.origin.orientation.w = 1.
        self.map_info = MapMetaData()
        self.map_info.map_load_time = rospy.Time.now()
        self.map_info.resolution = .05  # This matches the resolution of gmapping
        self.map_info.height = 746
        self.map_info.width = 775
        self.map_info.origin = self.origin

        # Sensor Model - certainty. When using no noise in lidar, keep these high
        #self.alpha = 0.9
        #self.beta = 0.7
        self.alpha = .999
        self.beta = .999

        # To stabilize our graph
        self.start_wait = rospy.get_time()
        self.twist = Twist()

        # Transform(s)
        # sensor to robot
        self.T_sensor = np.array([[np.cos(0), -1 * np.sin(0), 0.0],
                                  [np.sin(0), np.cos(0), 0.0], [0.0, 0.0,
                                                                1.0]])

        # robot to world
        self.T_robot = np.array([[np.cos(0), -1 * np.sin(0), 0.0],
                                 [np.sin(0), np.cos(0), 0.0], [0.0, 0.0, 1]])

        # occ_grid is a numpy matrix of probabilibites between 0 and 1
        # Prefill with 0.5 for unknown occupancy
        self.occ_grid = np.repeat(np.matrix(np.repeat(.5,
                                                      self.map_info.width)),
                                  self.map_info.height,
                                  axis=0)

        # ros_occ_grid is a ROS OccupancyGrid instance
        self.ros_occ_grid = OccupancyGrid()
        self.ros_occ_grid.info = self.map_info

        # Publishers
        self.map_pub = rospy.Publisher('/my_static_map',
                                       OccupancyGrid,
                                       queue_size=1)

        # Subscribers
        # pose_sub and scan_sub are synchronized.
        # when the updateMap callback is executed it receives a message from both.
        self.pose_sub = message_filters.Subscriber('/my_kalman_filter',
                                                   PoseStamped)
        self.scan_sub = message_filters.Subscriber('/robot0/scan', LaserScan)
        self.ts = message_filters.ApproximateTimeSynchronizer(
            [self.pose_sub, self.scan_sub], 10, 0.1)
        self.ts.registerCallback(self.updateMap)

        # Subscribe to twist because we don't want to map when we are turning
        self.twist_sub = rospy.Subscriber('robot0/input_vel', Twist,
                                          self.updateTwist)

        rospy.spin()
    def __init__(self):
        """Create subscribers and publishers."""

        # Get parameters and initialize class variables.
        self.num_agents = rospy.get_param('/num_of_robots')
        robot_name = rospy.get_param('~robot_name')
        using_sim_kalman = False
        if rospy.get_param('/run_type') == 'sim':
            using_sim_kalman = rospy.get_param('/use_kalman')
            if not using_sim_kalman and rospy.get_param(
                    '/ctrl_loop_freq') != rospy.get_param('/data_stream_freq'):
                rospy.logwarn(
                    'When not using Kalman filter in simulation, ctrl_loop_freq and '
                    'data_stream_freq must be the same! Change these parameters in '
                    '`setup_sim.launch` and interval_sim in `definitions.inc`!'
                )

        # Create publishers for commands
        pub_keys = [
            robot_name + '_{}'.format(i) for i in range(self.num_agents)
        ]

        # Publisher for locations of nearest agents
        self.nearest = dict.fromkeys(pub_keys)
        for key in self.nearest.keys():
            self.nearest[key] = rospy.Publisher('/' + key + '/nearest',
                                                OdometryArray,
                                                queue_size=1)

        # Publisher for locations of walls and obstacles
        self.avoid = dict.fromkeys(pub_keys)
        for key in self.avoid.keys():
            self.avoid[key] = rospy.Publisher('/' + key + '/avoid',
                                              PoseArray,
                                              queue_size=1)

        # Create subscribers
        rospy.Subscriber('/map',
                         OccupancyGrid,
                         self.map_callback,
                         queue_size=1)
        rospy.sleep(0.5)  # Wait for first map_callback to finish
        rospy.Subscriber('/dyn_reconf/parameter_updates',
                         Config,
                         self.param_callback,
                         queue_size=1)
        self.param_callback(None)

        if using_sim_kalman:
            topic_name = '/' + robot_name + '_{}/odom_est'
        else:
            topic_name = '/' + robot_name + '_{}/odom'

        subs = [
            mf.Subscriber(topic_name.format(i), Odometry)
            for i in range(self.num_agents)
        ]
        self.ts = mf.ApproximateTimeSynchronizer(
            subs, 10, 0.11)  # TODO: set this to be parametric as well
        self.ts.registerCallback(self.robot_callback)

        # Keep program from exiting
        rospy.spin()
示例#28
0
    print("...ok")

    runCommandClient = rospy.ServiceProxy('run_command', RunCommand)
    runCommandStartDynamicGraph = rospy.ServiceProxy('start_dynamic_graph', Empty)

    initCode = open( "SotInitSim.py", "r").read().split("\n")
    
    print("Stack of Tasks launched")

    launchScript(initCode,'initialize SoT')

    # Check table distance
    rospy.init_node('TablePrehension', anonymous=True)

    global subRH
    subRH = message_filters.Subscriber("cmd_right_hand_sim", PointStamped) 

    global subLH
    subLH = message_filters.Subscriber("cmd_left_hand_sim", PointStamped)

    ts = message_filters.TimeSynchronizer([subRH,subLH], 1)
    ts.registerCallback(callbackCheck)
    
    # If no issue let's start
    raw_input()
    runCommandStartDynamicGraph() 

    print("...done")

    time.sleep(2)
示例#29
0
    def __init__(self):
        self.node_name = "Obstacle Detecion Node"
        robot_name = rospy.get_param("~veh", "")
        # robot_name = "dori"
        self.show_marker = (rospy.get_param("~show_marker", ""))
        # self.show_marker=True
        self.show_image = (rospy.get_param("~show_image", ""))
        # bounding box parameters in mm
        self.show_bb = (rospy.get_param("~show_bb", ""))
        self.bb_len = (rospy.get_param("~bb_len", ""))
        self.bb_wid = (rospy.get_param("~bb_wid", ""))
        print 'Here comes bb_len'
        print self.bb_len

        # Load camera calibration parameters
        self.intrinsics = load_camera_intrinsics(robot_name)
        self.visualizer = Visualizer(robot_name=robot_name)

        # Create Publishers
        if (self.show_marker):
            self.pub_topic_marker = '/{}/obst_detect_visual/visualize_obstacles'.format(
                robot_name)
            self.publisher_marker = rospy.Publisher(self.pub_topic_marker,
                                                    MarkerArray,
                                                    queue_size=1)
            print "YEAH I GIVE YOU THE MARKER"

        if (self.show_image):
            self.pub_topic_img = '/{}/obst_detect_visual/image/compressed'.format(
                robot_name)
            self.publisher_img = rospy.Publisher(self.pub_topic_img,
                                                 CompressedImage,
                                                 queue_size=1)
            print "YEAH I GIVE YOU THE IMAGE"

        # if (self.show_bb):
        self.pub_topic_bb_linelist = '/{}/obst_detect_visual/bb_linelist'.format(
            robot_name)
        self.publisher_bblinelist = rospy.Publisher(self.pub_topic_bb_linelist,
                                                    Marker,
                                                    queue_size=1)
        print "YEAH I GIVE YOU THE BOUNDINGBOXMARKERLIST"

        self.bbmarker = Marker()
        self.bbmarker.header.frame_id = '{}'.format(robot_name)
        # self.bbmarker.ns = "points_and_lines"
        # self.bbmarker.id = 0;
        self.bbmarker.action = Marker.ADD
        self.bbmarker.type = Marker.LINE_STRIP
        self.bbmarker.lifetime = rospy.Time(10.0)
        self.bbmarker.pose.orientation.w = 1.0
        self.bbmarker.scale.x = 0.02
        self.bbmarker.color.b = 1.0  # blue
        self.bbmarker.color.a = 1.0  # alpha

        corner1 = Point()
        corner2 = Point()
        corner3 = Point()
        corner4 = Point()
        corner1.y = -self.bb_wid / 2000
        corner1.x = 0
        corner2.y = -self.bb_wid / 2000
        corner2.x = self.bb_len / 1000
        corner3.y = self.bb_wid / 2000
        corner3.x = self.bb_len / 1000
        corner4.y = self.bb_wid / 2000
        corner4.x = 0

        self.bbmarker.points.append(corner1)
        self.bbmarker.points.append(corner2)
        self.bbmarker.points.append(corner3)
        self.bbmarker.points.append(corner4)
        self.bbmarker.points.append(corner1)

        # for i in range(1,10000):
        #    self.publisher_bblinelist.publish(self.bbmarker)

        self.sub_topic_arr = '/{}/obst_detect/posearray'.format(robot_name)
        self.subscriber_arr = message_filters.Subscriber(
            self.sub_topic_arr, PoseArray)
        # we MUST subscribe to the array for sure!!
        if (self.show_image):
            self.sub_topic = '/{}/camera_node/image/compressed'.format(
                robot_name)
            self.subscriber = message_filters.Subscriber(
                self.sub_topic, CompressedImage)

        if (self.show_marker and not (self.show_image)):
            self.subscriber_arr.registerCallback(self.marker_only_callback)
        else:
            self.ts = message_filters.TimeSynchronizer(
                [self.subscriber_arr, self.subscriber], 100)
            self.ts.registerCallback(self.callback)
示例#30
0
    ## LETS PUBLISH THIS TO THE BROADCASTER
    tf_br.sendTransform((0.0, 0.0, 0.5), (q[0], q[1], q[2], q[3]), Time.now(),
                        'base_link', 'world')

    # publish the roll pitch yaw topic
    stamped_msg = SensorMsgStamped()
    stamped_msg.data = [rollA, pitchA, yawM]
    stamped_msg.header.stamp.secs = Time.now().secs
    stamped_msg.header.stamp.nsecs = Time.now().nsecs * 10**(-9)

    rpyAM_pub_stamped.publish(stamped_msg)


if __name__ == "__main__":
    try:
        mag_sub = message_filters.Subscriber("/Magneto_topic_stamped",
                                             SensorMsgStamped)
        accel_sub = message_filters.Subscriber("/Accel_topic_stamped",
                                               SensorMsgStamped)
        combined_sub = message_filters.ApproximateTimeSynchronizer(
            [accel_sub, mag_sub], queue_size=5, slop=0.05)
        combined_sub.registerCallback(got_accel_mag)
        rospy.spin()
    except rospy.ROSInterruptException:
        pass

########################
# TO UNDERSTAND FUSION BETWEEN ACCELEROMETER AND MAGNETOMETER
# Author : Kartik Prakash
# Date   : 7/Mar/2020
# STEPS:
# 1. Get the raw accel values and the magnetometer values