Пример #1
0
def main(unused_argv):

    rospy.init_node('raven_vive_teleop')

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(
        os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = random_action_agent(env)

    f = open(model_save_rootdir + "rollout_log.txt", "w+")
    episode_steps = 0
    n_episode = 1
    while not rospy.is_shutdown():

        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []

            obs = env.reset()

        action = agent.act()
        #if action != None:
        #    print(action)
        i = 30
        while (i > 0):
            obs, reward, done, info, aux = env.step_simple(action,
                                                           use_aux=True)
            i -= 1
        state = get_state_ground_truth(env)
        print(state)
        print(aux)
        env.reset()

        rate.sleep()
Пример #2
0
    def __init__(self):

        self.rate = rospy.Rate(rospy.get_param("~frequency", 20))
        self.Ncars = rospy.get_param("/num_cars", 3)
        self.car_id = rospy.get_param("~car_id", 0)
        self.frame_name = rospy.get_param("/frame_name")
        self.frame_id = self.frame_name[self.car_id]
        self.id_dict = rospy.get_param("/id_dict", None)
        self.connections = rospy.get_param("/connections", None)
        self.own_connections = self.connections[str(self.car_id)]
        self.Nconn = len(self.own_connections)

        self.full_graph = dict_to_graph.convert(self.connections)
        self.graph = dict_to_graph.prune(self.full_graph, self.car_id)

        self.meas = CarMeasurement()
        self.meas.header = Header()
        self.meas.header.frame_id = self.frame_id
        self.meas.car_id = self.car_id

        self.uwb_ranges = self.init_uwb()
        self.gps = [None] * self.Nconn
        self.control = [None] * self.Nconn
        self.lidar = [None] * self.Nconn
        self.first_time = True

        self.br = tf.TransformBroadcaster()
        self.debug = MeasurementDebug()
        self.debug.header.frame_id = self.frame_id
        self.debug_pub = rospy.Publisher("meas_debug",
                                         MeasurementDebug,
                                         queue_size=1)
        self.meas_pub = rospy.Publisher("measurements",
                                        CarMeasurement,
                                        queue_size=1)

        self.gps_sub = []
        self.control_sub = []
        self.control_sub2 = []
        self.lidar_sub = []
        # in case sensor data is broadcast on global topics
        self.uwb_sub = rospy.Subscriber("/ranges",
                                        UWBRange,
                                        self.range_cb,
                                        queue_size=1)
        self.uwb_sub2 = []
        # control topic is gathered currently for face cars but not real
        self.control_sub.append(
            rospy.Subscriber("/control",
                             CarControl,
                             self.control_cb,
                             queue_size=1))
        self.lidar_sub.append(
            rospy.Subscriber("/lidar_pose",
                             LidarPose,
                             self.lidar_cb,
                             queue_size=1))
        # for sensor data broadcast in car namespaces
        for i, ID in enumerate(self.own_connections):
            self.gps_sub.append(
                rospy.Subscriber("odom" + str(ID),
                                 SimplePose,
                                 self.gps_cb, (i, ),
                                 queue_size=1))
            self.gps_sub.append(
                rospy.Subscriber("/car" + str(ID) + "/odom",
                                 Odometry,
                                 self.odom_cb, (ID, i),
                                 queue_size=1))
            self.control_sub.append(
                rospy.Subscriber("/car" + str(ID) + "/control",
                                 CarControl,
                                 self.control_cb,
                                 queue_size=1))
            self.lidar_sub.append(
                rospy.Subscriber("/car" + str(ID) + "/lidar_pose",
                                 LidarPose,
                                 self.lidar_cb,
                                 queue_size=1))
            self.lidar_sub.append(
                rospy.Subscriber("/car" + str(ID) + "/poseupdate",
                                 PoseWithCovarianceStamped,
                                 self.slam_cb, (ID, ),
                                 queue_size=1))
            self.uwb_sub2.append(
                rospy.Subscriber("/car" + str(ID) + "/ranges",
                                 UWBRange,
                                 self.range_cb,
                                 queue_size=1))
Пример #3
0
    def __init__(self, pozyx, ranging_protocol, robot_list, tag_pos,
                 robot_number, alpha, noise, R, link_to_robot, do_ranging,
                 tf_prefix):
        self.pozyx = pozyx
        self.ranging_protocol = ranging_protocol
        self.tag_pos = tag_pos
        self.robot_number = robot_number
        self.link_to_robot = link_to_robot
        self.do_ranging = do_ranging
        self.tf_prefix = tf_prefix

        self.distance_1 = pzx.DeviceRange()
        self.distance_2 = pzx.DeviceRange()
        self.distance_3 = pzx.DeviceRange()
        self.distance_4 = pzx.DeviceRange()

        self.A = robot_list[robot_number]['left']
        self.B = robot_list[robot_number]['right']

        if robot_number == 1:
            self.C = robot_list[2]['left']
            self.D = robot_list[2]['right']
        elif robot_number == 2:
            self.C = robot_list[1]['left']
            self.D = robot_list[1]['right']

        self.distance_prev_1 = 0
        self.distance_prev_2 = 0
        self.distance_prev_3 = 0
        self.distance_prev_4 = 0

        self.f1 = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
        self.f1.P = 1
        self.f1.H = np.array([[1.]])
        self.f1.F = np.array([[1.]])
        self.f1.B = np.array([[1.]])
        self.f1.Q = noise
        self.f1.R = R
        self.f1.alpha = alpha

        self.f2 = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
        self.f2.P = 1
        self.f2.H = np.array([[1.]])
        self.f2.F = np.array([[1.]])
        self.f2.B = np.array([[1.]])
        self.f2.Q = noise
        self.f2.R = R
        self.f2.alpha = alpha

        self.f3 = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
        self.f3.P = 1
        self.f3.H = np.array([[1.]])
        self.f3.F = np.array([[1.]])
        self.f3.B = np.array([[1.]])
        self.f3.Q = noise
        self.f3.R = R
        self.f3.alpha = alpha

        self.f4 = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
        self.f4.P = 1
        self.f4.H = np.array([[1.]])
        self.f4.F = np.array([[1.]])
        self.f4.B = np.array([[1.]])
        self.f4.Q = noise
        self.f4.R = R
        self.f4.alpha = alpha

        self.f5 = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
        self.f5.P = 1
        self.f5.H = np.array([[1.]])
        self.f5.F = np.array([[1.]])
        self.f5.B = np.array([[1.]])
        self.f5.Q = 0.001
        self.f5.R = 0.3
        self.f5.alpha = 1

        self.pozyx.setRangingProtocol(self.ranging_protocol)
        self.br = tf.TransformBroadcaster()
Пример #4
0
#!/usr/bin/env python

import roslib
roslib.load_manifest('subt_rviz_plugins')
from sensor_msgs.msg import LaserScan
import rospy
from math import cos, sin
import tf

topic = 'scan'
publisher = rospy.Publisher(topic, LaserScan, queue_size=5)

rospy.init_node('test_scan')

br = tf.TransformBroadcaster()
rate = rospy.Rate(10)
radius = 5
angle = 0

dist = 3
while not rospy.is_shutdown():

    scan = LaserScan()
    scan.header.frame_id = "/base_link"
    scan.header.stamp = rospy.Time.now()

    scan.angle_min = 3.14159274101
    scan.angle_max = 3.14159274101
    scan.angle_increment = float('nan')
    scan.time_increment = 0.0
    scan.range_min = 0.04
def bcast(tran_vec, rot_quaternion, parent_frame, child_frame):
    br = tf.TransformBroadcaster()
    rate = rospy.Rate(10)
    br.sendTransform((tran_vec[0], tran_vec[1], tran_vec[2]), rot_quaternion,
                     rospy.Time.now(), child_frame, parent_frame)
def handle_turtle_pose(msg, turtlename):
    br = tf.TransformBroadcaster()
    br.sendTransform((msg.x, msg.y,0), tf.transformations.quaternion_from_euler(0, 0, msg.theta), rospy.Time.now(), turtlename, "world")
Пример #7
0
if __name__ == '__main__':
    # Don't change this code####################################################################################
    rospy.init_node('sample_node')

    ####################################################################################################################
    relaxedIK = RelaxedIK.init_from_config(config_file_name)
    ####################################################################################################################

    urdf_file = open(relaxedIK.vars.urdf_path, 'r')
    urdf_string = urdf_file.read()
    rospy.set_param('robot_description', urdf_string)
    js_pub = rospy.Publisher('joint_states', JointState, queue_size=5)
    ee_pose_goals_pub = rospy.Publisher('/relaxed_ik/ee_pose_goals',
                                        EEPoseGoals,
                                        queue_size=3)
    tf_pub = tf.TransformBroadcaster()

    rospy.sleep(0.3)

    # Don't change this code ###########################################################################################
    uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
    roslaunch.configure_logging(uuid)
    launch_path = os.path.dirname(
        __file__) + '/../launch/robot_state_pub.launch'
    launch = roslaunch.parent.ROSLaunchParent(uuid, [launch_path])
    launch.start()
    ####################################################################################################################

    rospy.sleep(1.0)

    counter = 0.0
Пример #8
0
def amcl2robot_pose_pub():
    global tag_center_pose
    global amcl_pose
    global orient
    global startstop
    global th_laser

    # si iscrive al topic che contiene la posa del punto centrale tra le due tag
    sub_tag_in_map = rospy.Subscriber(tag_in_map_topic_ID, PoseStamped,
                                      vicon_cb_0)
    sub_amcl_pose = rospy.Subscriber("amcl_pose", PoseWithCovarianceStamped,
                                     vicon_cb_1)
    sub_amcl_pose = rospy.Subscriber("/scan", LaserScan, lidar_cb)
    sub_start_stop = rospy.Subscriber("/start_and_stop", Float64, motori_cb)

    robot_pose_pub = rospy.Publisher("/robot_pose", Point, queue_size=100)
    pub_start_stop = rospy.Publisher("/start_and_stop",
                                     Float64,
                                     queue_size=100)

    rospy.init_node('amcl2robot_pose_PY')

    while not rospy.is_shutdown():

        # controlla quanto la posa di AMCL si discosta da quella delle UWB, tutto nel frame MAP

        amcl_x = amcl_pose.pose.pose.position.x
        amcl_y = amcl_pose.pose.pose.position.y
        amcl_orient = tf.transformations.euler_from_quaternion(
            (amcl_pose.pose.pose.orientation.x,
             amcl_pose.pose.pose.orientation.y,
             amcl_pose.pose.pose.orientation.z,
             amcl_pose.pose.pose.orientation.w))
        amcl_orient = amcl_orient[2]  # prendo lo yaw

        # punto centrale tra le due tag
        tag_x = tag_center_pose.pose.position.x
        tag_y = tag_center_pose.pose.position.y
        tag_orient = tf.transformations.euler_from_quaternion(
            (tag_center_pose.pose.orientation.x,
             tag_center_pose.pose.orientation.y,
             tag_center_pose.pose.orientation.z,
             tag_center_pose.pose.orientation.w))
        tag_orient = tag_orient[2]  # prendo lo yaw

        if (np.sum(scansione.intensities) <= th_laser):
            blind_laser = True
        else:
            blind_laser = False

        robot_pose = Point()

        if (blind_laser):

            #controllo_precedente = startstop
            #pub_start_stop.publish(0.0)

            robot_pose.x = tag_x
            robot_pose.y = tag_y
            robot_pose.z = tag_orient
            rospy.loginfo("Laser offline, navigazione UWB in corso")

        else:

            robot_pose.x = amcl_x
            robot_pose.y = amcl_y
            robot_pose.z = amcl_orient

        robot_pose_pub.publish(robot_pose)
        # -------------------------- TF --------------------------
        tf_quat = tf.transformations.quaternion_from_euler(0, 0, robot_pose.z)
        # publish TF
        br_0 = tf.TransformBroadcaster()
        br_0.sendTransform((robot_pose.x, robot_pose.y, 0), tf_quat,
                           rospy.Time.now(), robot_pose_frame_ID, map_frame_ID)
Пример #9
0
def get_transform_lists(bag_file_name):
    init_node()
    broadcaster = tf.TransformBroadcaster()
    listener = tf.TransformListener()
    listener.setUsingDedicatedThread(True)
    checkerboard_in_camera_trans = []
    wrist_in_body_trans = []
    #pdb.set_trace()
    camera_in_body_estimate = None
    checkerboard_to_wrist_estimate = None
    bag = rosbag.Bag(bag_file_name)

    tf_header_ids = set()
    tf_child_ids = set()

    for topic, message, time in bag.read_messages(topics=['tf', '/tf']):

        for tf_message_stamped in message.transforms:
            tf_message = tf_message_stamped.transform
            translation = [
                tf_message.translation.x, tf_message.translation.y,
                tf_message.translation.z
            ]
            rotation = [
                tf_message.rotation.x, tf_message.rotation.y,
                tf_message.rotation.z, tf_message.rotation.w
            ]
            broadcaster.sendTransform(translation, rotation, rospy.Time.now(),
                                      tf_message_stamped.child_frame_id,
                                      tf_message_stamped.header.frame_id)
            tf_message_stamped.header.stamp = rospy.Time.now()
            listener.setTransform(tf_message_stamped, "user")

        for tf_message in message.transforms:
            if tf_message.header.frame_id not in tf_header_ids:
                tf_header_ids.add(tf_message.header.frame_id)
                print 'found new frame %s' % tf_message.header.frame_id

            if tf_message.child_frame_id not in tf_child_ids:
                tf_child_ids.add(tf_message.child_frame_id)
                print 'found new child frame %s' % tf_message.child_frame_id

            if 'wrist_board' in tf_message.header.frame_id or 'wrist_board_corner' in tf_message.child_frame_id:
                print 'found keyframe'
                if camera_in_body_estimate is None:
                    try:
                        listener.waitForTransform('/camera_link', '/world',
                                                  rospy.Time(0),
                                                  rospy.Duration(.01))
                        camera_in_body_tf = listener.lookupTransform(
                            '/camera_link', '/world', rospy.Time(0))

                        print "got camera to world transform"
                        camera_in_body_estimate = tf_conversions.toMatrix(
                            tf_conversions.fromTf(camera_in_body_tf))
                    except:
                        print 'could not get camera to world transform, skipping. Are you sure you ran tf between camera_link and world?'
                        continue
                    print "got camera to world estimate"

                if checkerboard_to_wrist_estimate is None:
                    try:
                        listener.waitForTransform('/wrist_board',
                                                  '/wrist_board_corner',
                                                  rospy.Time(0),
                                                  rospy.Duration(.01))
                        #(trans, rot) = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print trans
                        #print rot
                        checkerboard_to_wrist_tf = listener.lookupTransform(
                            '/wrist_board', '/wrist_board_corner',
                            rospy.Time(0))
                        #print checkerboard_to_wrist_tf
                        #raw_input("press a key")

                        #checkerboard_to_wrist_tf = ((1.75, -0.75, -0.121),(-0.09, -0.04, 0.73, 0.66))
                        #print 'yinxiao test'

                        print "got wristboad in wrist"
                        checkerboard_to_wrist_estimate = tf_conversions.toMatrix(
                            tf_conversions.fromTf(checkerboard_to_wrist_tf))

                    except:
                        print 'could not get wristboard to wrist_board_corner, skipping'
                        continue
                    print "got wristboard in wrist estimate"
                try:
                    listener.waitForTransform('/wrist_board', '/camera_link',
                                              rospy.Time(0),
                                              rospy.Duration(.01))
                    listener.waitForTransform('/wrist_board_corner', '/world',
                                              rospy.Time(0),
                                              rospy.Duration(.1))

                    checkerboard_tf = listener.lookupTransform(
                        '/wrist_board', '/camera_link', rospy.Time(0))
                    #print "got wristboard in camera"

                    checkerboard_in_camera_trans.append(
                        tf_conversions.toMatrix(
                            tf_conversions.fromTf(checkerboard_tf)))

                    #print "got left wrist in world"
                    wrist_in_body_tf = listener.lookupTransform(
                        '/wrist_board_corner', '/world', rospy.Time(0))
                    wrist_in_body_trans.append(
                        tf_conversions.toMatrix(
                            tf_conversions.fromTf(wrist_in_body_tf)))
                except:
                    continue
                #print "finished loop"

    return checkerboard_in_camera_trans, wrist_in_body_trans, camera_in_body_estimate, checkerboard_to_wrist_estimate
Пример #10
0
def talker():
    odom_pub = rospy.Publisher("odom1", Odometry, queue_size=50)
    map_pub = rospy.Publisher("map1", OccupancyGrid, queue_size=10)
    rospy.init_node('talker', anonymous=True)

    odom_broadcaster = tf.TransformBroadcaster()
    #x = 0.0
    #y = 0.0
    th = 0.0

    #vx = 0.1
    #vy = -0.1
    vth = 0.1

    current_time = rospy.Time.now()
    last_time = rospy.Time.now()
    prefix = "out_turtle_map_"
    ext = ".owl"
    times = "1713"  ## update init owl file
    file_input = prefix + times + ext

    r = rospy.Rate(1.0)
    while not rospy.is_shutdown():
        current_time = rospy.Time.now()
        g = Graph()
        '''
        #to Cloud
        print "file to download "+ file_input
        myCmd = 'gsutil -m cp gs://slam-up-bucket/'+file_input+' ./input'
        os.system(myCmd)
        local_file_input = 'input/'+file_input
        print "file que se va parsear "+local_file_input
        '''
        local_file_input = 'out/' + file_input  ## INFO:: en el local
        if os.path.exists(local_file_input):
            print "exist path " + local_file_input
            g.parse(local_file_input, format="turtle")
            print("graph has %s statements." % len(g))
            px, py, pz = get_position(g)
            ox, oy, oz, ow = get_orientation(g)
            matrix = get_covar_matrix(g)

            covar = np.array([0.0] * 36).reshape(6, 6)
            for cell in matrix:
                row = int(cell[0])
                col = int(cell[1])
                value = float(cell[2])
                covar[row, col] = value

            covar_list = tuple(covar.ravel().tolist())

            #Creacion de nav_msg/Odometry
            # compute odometry in a typical way given the velocities of the robot
            '''dt = (current_time - last_time).to_sec()
            delta_x = (vx * cos(th) - vy * sin(th)) * dt
            delta_y = (vx * sin(th) + vy * cos(th)) * dt
            delta_th = vth * dt

            x += delta_x
            y += delta_y
            th += delta_th '''

            # since all odometry is 6DOF we'll need a quaternion created from yaw
            odom_quat = tf.transformations.quaternion_from_euler(0, 0, th)

            # first, we'll publish the transform over tf
            odom_broadcaster.sendTransform((px, py, pz), odom_quat,
                                           current_time, "base_link1", "odom1")

            # next, we'll publish the odometry message over ROS
            odom = Odometry()
            odom.header.stamp = current_time
            odom.header.frame_id = "odom1"

            # set the position
            odom.pose.pose = Pose(Point(px, py, pz),
                                  Quaternion(ox, oy, oz, ow))
            odom.pose.covariance = covar_list

            # set the velocity
            vx = vy = 0
            odom.child_frame_id = "base_link1"
            odom.twist.twist = Twist(Vector3(vx, vy, 0), Vector3(0, 0, vth))

            odom_pub.publish(odom)

            ##map
            width = height = resolution = 0
            mapa_info = get_map_info(g)
            if len(mapa_info) > 0:
                flagMap = True
            if flagMap is not True and var_m is not None and var_grid_list is not None:
                print "using saved"
                m = var_m
                grid_list = var_grid_list
            else:
                print "reading map"
                for row in mapa_info:
                    p = row[1]
                    o = p[p.find('#') + 1:len(p)]
                    if o == "Width":
                        width = int(row[2])
                    if o == "Height":
                        height = int(row[2])
                resolution = get_map_resolution(g)

                #Creacion de nav_msg/occupancyGrid
                len_grid = width * height
                grid_map = np.array([-1] * len_grid).reshape(width, height)
                objects = get_map_objects(g)
                print("objects detected")
                print(len(objects))
                for row in objects:
                    p = row[0]
                    obj_prefix = "http://github.com/Alex23013/slam-up/individual/obj/"
                    obj_pos = p[len(obj_prefix):len(p)]
                    parts = obj_pos.split('_')
                    grid_map[int(parts[0]), int(parts[1])] = int(row[1])

                grid_list = tuple(grid_map.ravel().tolist())

                map_broadcaster = tf.TransformBroadcaster()
                map_broadcaster.sendTransform(
                    (0, 0, 0), odom_quat, current_time, "base_link1", "map1")

                m = MapMetaData()
                m.resolution = resolution
                m.width = width
                m.height = height
                pos = np.array(
                    [-width * resolution / 2, -height * resolution / 2, 0])
                quat = np.array([0, 0, 0, 1])
                m.origin = Pose()
                m.origin.position.x, m.origin.position.y = pos[:2]

            ogrid = OccupancyGrid()
            ogrid.header.frame_id = 'map1'
            ogrid.header.stamp = rospy.Time.now()

            ogrid.info = m
            ogrid.data = grid_list

            var_m = m
            var_grid_list = grid_list

            map_pub.publish(ogrid)
            rospy.loginfo("publishing map:")
            rospy.loginfo(times)
            last_time = current_time

        else:
            print local_file_input, "file not found"
        rospy.loginfo("publishing odometry:")
        rospy.loginfo(times)
        times = str(1 + int(times))
        file_input = prefix + times + ext
        print "antes sleep"
        r.sleep()
        print "fin_loop"
Пример #11
0
    def __init__(self):

        rospy.init_node('ginger_control_interface')
        self.rospy = rospy
        self.broadcaster = tf.TransformBroadcaster()
        self.listener = tf.TransformListener()

        # the frequency for rate, suppose to under 20 Hz
        self.rate = rospy.Rate(20)

        # self._main_body_joint = Queue.Queue(2)

        # self._left_arm_joint = Queue.Queue(2)
        # self._right_arm_joint = Queue.Queue(2)
        # self._left_arm_vel = Queue.Queue(2)
        # self._right_arm_vel = Queue.Queue(2)
        # self._left_arm_cur = Queue.Queue(2)
        # self._right_arm_cur = Queue.Queue(2)
        #
        # self._left_hand_joint = Queue.Queue(2)
        # self._right_hand_joint = Queue.Queue(2)

        self._main_body_joint = None
        self._left_arm_joint = None
        self._right_arm_joint = None
        self._left_arm_vel = None
        self._right_arm_vel = None
        self._left_arm_cur = None
        self._right_arm_cur = None
        self._left_hand_joint = None
        self._right_hand_joint = None

        ################## Subscriber ############################

        self.rospy.Subscriber("/MainBody/Position", BodyMsgs,
                              self.main_body_joint_CALLBACK)

        self.rospy.Subscriber("/LeftArm/Position", ArmMsgs,
                              self.left_arm_joint_CALLBACK)
        self.rospy.Subscriber("/RightArm/Position", ArmMsgs,
                              self.right_arm_joint_CALLBACK)

        self.rospy.Subscriber("/LeftArm/Velocity", ArmMsgs,
                              self.left_arm_vel_CALLBACK)
        self.rospy.Subscriber("/RightArm/Velocity", ArmMsgs,
                              self.right_arm_vel_CALLBACK)

        self.rospy.Subscriber("/LeftArm/Current", ArmMsgs,
                              self.left_arm_cur_CALLBACK)
        self.rospy.Subscriber("/RightArm/Current", ArmMsgs,
                              self.right_arm_cur_CALLBACK)

        self.rospy.Subscriber("/LeftHand/Position", HandMsgs,
                              self.left_hand_joint_CALLBACK)
        self.rospy.Subscriber("/RightHand/Position", HandMsgs,
                              self.right_hand_joint_CALLBACK)

        #################### Publisher ###########################
        self._main_body_mode_pub = self.rospy.Publisher(
            "/XR1/MainBodyChainModeChange", ChainModeChange, queue_size=5)

        self._left_arm_mode_pub = self.rospy.Publisher(
            "/XR1/LeftArmChainModeChange", ChainModeChange, queue_size=5)
        self._right_arm_mode_pub = self.rospy.Publisher(
            "/XR1/RightArmChainModeChange", ChainModeChange, queue_size=5)

        self._left_hand_mode_pub = self.rospy.Publisher(
            "/XR1/LeftHandChainModeChange", ChainModeChange, queue_size=5)
        self._right_hand_mode_pub = self.rospy.Publisher(
            "/XR1/RightHandChainModeChange", ChainModeChange, queue_size=5)

        ####
        self._main_body_joint_pub = self.rospy.Publisher(
            "/MainBody/TargetPosition", BodyMsgs, queue_size=5)

        self._left_arm_joint_pub = self.rospy.Publisher(
            "/LeftArm/TargetPosition", ArmMsgs, queue_size=5)
        self._right_arm_joint_pub = self.rospy.Publisher(
            "/RightArm/TargetPosition", ArmMsgs, queue_size=5)

        self._left_arm_vel_pub = self.rospy.Publisher(
            "/LeftArm/TargetVelocity", ArmMsgs, queue_size=5)
        self._right_arm_vel_pub = self.rospy.Publisher(
            "/RightArm/TargetVelocity", ArmMsgs, queue_size=5)

        self._left_arm_cur_pub = self.rospy.Publisher("/LeftArm/TargetCurrent",
                                                      ArmMsgs,
                                                      queue_size=5)
        self._right_arm_cur_pub = self.rospy.Publisher(
            "/RightArm/TargetCurrent", ArmMsgs, queue_size=5)

        self._left_hand_joint_pub = self.rospy.Publisher(
            "/LeftHand/TargetPosition", HandMsgs, queue_size=5)
        self._right_hand_joint_pub = self.rospy.Publisher(
            "/RightHand/TargetPosition", HandMsgs, queue_size=5)

        self._left_elbow_pub = self.rospy.Publisher("/LeftArm/ElbowAngle",
                                                    Float64,
                                                    queue_size=5)
        self._right_elbow_pub = self.rospy.Publisher("/RightArm/ElbowAngle",
                                                     Float64,
                                                     queue_size=5)

        self._joint_state_pub = self.rospy.Publisher("/ginger/joint_states",
                                                     JointState,
                                                     queue_size=5)

        ### uploading joint state to MMO server
        self.joint_state = JointState()

        self._wheel_name = ["Wheel_left", "Wheel_right", "Wheel_back"]
        self._main_body_name = [
            "Knee_X", "Back_Z", "Back_X", "Back_Y", "Neck_Z", "Neck_X",
            "Head_Y"
        ]

        self._left_arm_name = [
            "Left_Shoulder_X", "Left_Shoulder_Y", "Left_Elbow_Z",
            "Left_Elbow_X", "Left_Wrist_Z", "Left_Wrist_X", "Left_Wrist_Y"
        ]

        self._right_arm_name = [
            "Right_Shoulder_X", "Right_Shoulder_Y", "Right_Elbow_Z",
            "Right_Elbow_X", "Right_Wrist_Z", "Right_Wrist_X", "Right_Wrist_Y"
        ]

        self._left_hand_name = [
            "Left_1_1", "Left_2_1", "Left_3_1", "Left_4_1", "Left_5_1",
            "Left_1_2", "Left_2_2", "Left_3_2", "Left_4_2", "Left_5_2",
            "Left_1_3", "Left_2_3", "Left_3_3", "Left_4_3", "Left_5_3"
        ]
        self._right_hand_name = [
            "Right_1_1", "Right_2_1", "Right_3_1", "Right_4_1", "Right_5_1",
            "Right_1_2", "Right_2_2", "Right_3_2", "Right_4_2", "Right_5_2",
            "Right_1_3", "Right_2_3", "Right_3_3", "Right_4_3", "Right_5_3"
        ]

        self.joint_state.name = self._wheel_name + self._main_body_name + self._left_arm_name + self._right_arm_name + \
                                self._left_hand_name + self._right_hand_name

        self.joint_state.position = [0] * 54
        self.joint_state.velocity = [0] * 54
        self.joint_state.effort = [0] * 54

        self.rospy.loginfo("Ginger Control")
Пример #12
0
  def __init__(self):
    # Read parameters to configure the node
    tf_publish_rate = read_parameter('~tf_publish_rate', 50.)
    tf_period = 1./tf_publish_rate if tf_publish_rate > 0 else float('inf')
    parent_frame = read_parameter('~parent_frame', 'world')
    optitrack_frame = read_parameter('~optitrack_frame', 'optitrack')
    rigid_bodies = read_parameter('~rigid_bodies', dict())
    names = []
    ids = []
    for name,value in rigid_bodies.items():
      names.append(name)
      ids.append(value)
    # Setup Publishers
    pose_pub = rospy.Publisher('/optitrack/rigid_bodies', RigidBodyArray, queue_size=3)
    pose_act_pub = rospy.Publisher('/optitrack/odometry/act_pose', Odometry, queue_size=3)
    # pose_act_pub = rospy.Publisher('/optitrack/odometry/act_pose', Odometry, queue_size=3)


    # Setup TF listener and broadcaster
    tf_listener = tf.TransformListener()
    tf_broadcaster = tf.TransformBroadcaster()
    # Connect to the optitrack system
    iface = read_parameter('~iface', 'eth1')
    version = (2, 7, 0, 0)  # the latest SDK version
    #IPython.embed()
    #optitrack = rx.mkdatasock(ip_address=get_ip_address(iface))
    #Modified by Nikhil
    # optitrack = rx.mkdatasock(ip_address=iface)
    optitrack = rx.mkcmdsock(ip_address=iface)
    msg = struct.pack("I", rx.NAT_PING)
    server_address = '192.168.1.147'
    result = optitrack.sendto(msg, (server_address, rx.PORT_COMMAND))


    rospy.loginfo('Successfully connected to optitrack')
    # Get transformation from the world to optitrack frame
    (parent, child) = (parent_frame, optitrack_frame)
    try:
      now = rospy.Time.now() + rospy.Duration(1.0)
      tf_listener.waitForTransform(parent, child, now, rospy.Duration(5.0))
      (pos,rot) = tf_listener.lookupTransform(parent, child, now)
      wTo = PoseConv.to_homo_mat(pos, rot)  # return 4x4 numpy mat
    except (tf.Exception, tf.LookupException, tf.ConnectivityException):
      rospy.logwarn('Failed to get transformation from %r to %r frame' % (parent, child))
      parent_frame = optitrack_frame
      wTo = np.eye(4)
    # Track up to 24 rigid bodies
    prevtime = np.ones(24)*rospy.get_time()
    # IPython.embed()
    while not rospy.is_shutdown():
      try:
        data = optitrack.recv(rx.MAX_PACKETSIZE)
        # IPython.embed()
        # data, address = optitrack.recvfrom(rx.MAX_PACKETSIZE + 4)
        # IPython.embed()
        # rospy.loginfo("here")
      except socket.error:
        if rospy.is_shutdown():  # exit gracefully
          return
        else:
          rospy.logwarn('Failed to receive packet from optitrack')
      msgtype, packet = rx.unpack(data, version=version)
      if type(packet) is rx.SenderData:
        version = packet.natnet_version
        rospy.loginfo('NatNet version received: ' + str(version))
      if type(packet) in [rx.SenderData, rx.ModelDefs, rx.FrameOfData]:
        # Optitrack gives the position of the centroid. 
        array_msg = RigidBodyArray()
        pose_act_msg = Odometry()
        # IPython.embed()
        if msgtype == rx.NAT_FRAMEOFDATA:
          # loop through all the rigid bodies
          # use only one for right now
          for i, rigid_body in enumerate(packet.rigid_bodies):
            body_id = rigid_body.id

            # IPython.embed()

            pos_opt = np.array(rigid_body.position)
            rot_opt = np.array(rigid_body.orientation)
            # IPython.embed()
            oTr = PoseConv.to_homo_mat(pos_opt, rot_opt)
            # Transformation between world frame and the rigid body            
            WTr = np.dot(wTo, oTr)
            wTW = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
            wTr = np.dot(wTW,WTr)

            ## New Change ##
            # Toffset = np.array( [[0, 1, 0, 0],[0, 0, 1, 0],[1, 0, 0, 0],[0, 0, 0, 1]] )
            # tf_wTr = np.dot(oTr,Toffset)
            # tf_pose = Pose()
            # tf_pose.position = Point(*tf_wTr[:3,3])
            # tf_pose.orientation = Quaternion(*tr.quaternion_from_matrix(tf_wTr))

            # IPython.embed()

            array_msg.header.stamp = rospy.Time.now()
            array_msg.header.frame_id = parent_frame
            body_msg = RigidBody()
            pose = Pose()
            pose.position = Point(*wTr[:3,3])
            # OTr = np.dot(oTr,Toffset)
            # pose.orientation = Quaternion(*tr.quaternion_from_matrix(oTr))

            # change 26 Feb. 2017
            # get the euler angle we want then compute the new quaternion
            euler = tr.euler_from_quaternion(rot_opt)
            quaternion = tr.quaternion_from_euler(euler[2],euler[0],euler[1])
            pose.orientation.x = quaternion[0]
            pose.orientation.y = quaternion[1]
            pose.orientation.z = quaternion[2]
            pose.orientation.w = quaternion[3]

            # change made 11 july, 2017
            # directly get position and orientation information
            roll = euler[2]*180/np.pi
            pitch = euler[0]*180/np.pi
            yaw = euler[1]*180/np.pi

            # set up pose_act_msg publisher
            pose_act_msg.header.stamp = array_msg.header.stamp
            pose_act_msg.header.frame_id = parent_frame
            # add velocity estimation later
            # pose_act_msg.vel
            pose_act_msg.position = pose.position
            pose_act_msg.yaw = yaw
            pose_act_msg.euler.x = roll
            pose_act_msg.euler.y = pitch
            pose_act_msg.euler.z = yaw
            pose_act_msg.tracking_valid = rigid_body.tracking_valid

            body_msg.id = body_id
            body_msg.tracking_valid = rigid_body.tracking_valid
            body_msg.mrk_mean_error = rigid_body.mrk_mean_error
            body_msg.pose = pose
            for marker in rigid_body.markers:
            # TODO: Should transform the markers as well
              body_msg.markers.append(Point(*marker))
            array_msg.bodies.append(body_msg)
            # Control the publish rate for the TF broadcaster
            if rigid_body.tracking_valid and (rospy.get_time()-prevtime[body_id] >= tf_period):
              body_name = 'rigid_body_%d' % (body_id)
              # if body_id in ids:
              #   idx = ids.index(body_id)
              #   body_name = names[idx]
              ## no change ##
              # tf_broadcaster.sendTransform(pos_opt, rot_opt, rospy.Time.now(), body_name, optitrack_frame)
              # change 1 ## 
              # pos2 = np.array([tf_pose.position.x, tf_pose.position.y, tf_pose.position.z])
              # rot2 = np.array([tf_pose.orientation.x, tf_pose.orientation.y, tf_pose.orientation.z, tf_pose.orientation.w])
              # tf_broadcaster.sendTransform(pos2, rot2, rospy.Time.now(), body_name, optitrack_frame)
              ## change 2 ## <fail>
              # pos2 = np.array([-pose.position.y, pose.position.x, pose.position.z])
              # rot2 = np.array([-pose.orientation.y, pose.orientation.x, pose.orientation.z, pose.orientation.w])
              # tf_broadcaster.sendTransform(pos2, rot2, rospy.Time.now(), body_name, optitrack_frame)
              ## change 3 ## <fail>
              # pos2 = np.array([-pos_opt[1],pos_opt[0],pos_opt[2]])
              # rot2 = np.array([-rot_opt[1],rot_opt[0],rot_opt[2],rot_opt[3]])
              # tf_broadcaster.sendTransform(pos2, rot2, rospy.Time.now(), body_name, optitrack_frame)
              ## change 4 ## <>
              pos2 = np.array([pose.position.x, pose.position.y, pose.position.z])
              rot2 = np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z,pose.orientation.w])
              tf_broadcaster.sendTransform(pos2, rot2, rospy.Time.now(), body_name, parent_frame)

              prevtime[body_id] = rospy.get_time()

        pose_pub.publish(array_msg)
        pose_act_pub.publish(pose_act_msg)
Пример #13
0
	def publish_tf(self):
		br = tf.TransformBroadcaster()
		br.sendTransform((self.model_pose1[0], self.model_pose1[1], self.model_pose1[2]), (0.0, 0.0, 0.0, 1.0), rospy.Time.now(), "object", "odom")
Пример #14
0
    def __init__(self):
        # parameters
        self.ANGLE_STEP = int(rospy.get_param("~angle_step"))
        self.MAX_PARTICLES = int(rospy.get_param("~max_particles"))
        self.MAX_VIZ_PARTICLES = int(rospy.get_param("~max_viz_particles"))
        self.INV_SQUASH_FACTOR = 1.0 / float(rospy.get_param("~squash_factor"))
        self.MAX_RANGE_METERS = float(rospy.get_param("~max_range"))
        self.THETA_DISCRETIZATION = int(
            rospy.get_param("~theta_discretization"))
        self.WHICH_RM = rospy.get_param("~range_method", "cddt").lower()
        self.RANGELIB_VAR = int(rospy.get_param("~rangelib_variant", "3"))
        self.SHOW_FINE_TIMING = bool(rospy.get_param("~fine_timing", "0"))
        self.PUBLISH_ODOM = bool(rospy.get_param("~publish_odom", "1"))
        self.PUBLISH_TF = bool(rospy.get_param("~publish_tf", "1"))
        self.DO_VIZ = bool(rospy.get_param("~viz"))

        # sensor model constants
        self.Z_SHORT = float(rospy.get_param("~z_short", 0.01))
        self.Z_MAX = float(rospy.get_param("~z_max", 0.07))
        self.Z_RAND = float(rospy.get_param("~z_rand", 0.12))
        self.Z_HIT = float(rospy.get_param("~z_hit", 0.75))
        self.SIGMA_HIT = float(rospy.get_param("~sigma_hit", 8.0))

        # motion model constants
        self.MOTION_DISPERSION_X = float(
            rospy.get_param("~motion_dispersion_x", 0.05))
        self.MOTION_DISPERSION_Y = float(
            rospy.get_param("~motion_dispersion_y", 0.025))
        self.MOTION_DISPERSION_THETA = float(
            rospy.get_param("~motion_dispersion_theta", 0.25))

        # various data containers used in the MCL algorithm
        self.MAX_RANGE_PX = None
        self.odometry_data = np.array([0.0, 0.0, 0.0])
        self.laser = None
        self.iters = 0
        self.map_info = None
        self.map_initialized = False
        self.lidar_initialized = False
        self.odom_initialized = False
        self.last_pose = None
        self.laser_angles = None
        self.downsampled_angles = None
        self.range_method = None
        self.last_time = None
        self.last_stamp = None
        self.first_sensor_update = True
        self.state_lock = Lock()

        # cache this to avoid memory allocation in motion model
        self.local_deltas = np.zeros((self.MAX_PARTICLES, 3))

        # cache this for the sensor model computation
        self.queries = None
        self.ranges = None
        self.tiled_angles = None
        self.sensor_model_table = None

        # particle poses and weights
        self.inferred_pose = None
        self.particle_indices = np.arange(self.MAX_PARTICLES)
        self.particles = np.zeros((self.MAX_PARTICLES, 3))
        self.weights = np.ones(self.MAX_PARTICLES) / float(self.MAX_PARTICLES)

        # initialize the state
        self.smoothing = Utils.CircularArray(10)
        self.timer = Utils.Timer(10)
        self.get_omap()
        self.precompute_sensor_model()
        self.initialize_global()

        # these topics are for visualization
        self.pose_pub = rospy.Publisher("/pf/viz/inferred_pose",
                                        PoseStamped,
                                        queue_size=1)
        self.particle_pub = rospy.Publisher("/pf/viz/particles",
                                            PoseArray,
                                            queue_size=1)
        self.pub_fake_scan = rospy.Publisher("/pf/viz/fake_scan",
                                             LaserScan,
                                             queue_size=1)
        self.rect_pub = rospy.Publisher("/pf/viz/poly1",
                                        PolygonStamped,
                                        queue_size=1)

        if self.PUBLISH_ODOM:
            self.odom_pub = rospy.Publisher("/pf/pose/odom",
                                            Odometry,
                                            queue_size=1)

        # these topics are for coordinate space things
        self.pub_tf = tf.TransformBroadcaster()

        # these topics are to receive data from the racecar
        self.laser_sub = rospy.Subscriber(rospy.get_param(
            "~scan_topic", "/scan"),
                                          LaserScan,
                                          self.lidarCB,
                                          queue_size=1)
        self.odom_sub = rospy.Subscriber(rospy.get_param(
            "~odometry_topic", "/odom"),
                                         Odometry,
                                         self.odomCB,
                                         queue_size=1)
        self.pose_sub = rospy.Subscriber("/initialpose",
                                         PoseWithCovarianceStamped,
                                         self.clicked_pose,
                                         queue_size=1)
        self.click_sub = rospy.Subscriber("/clicked_point",
                                          PointStamped,
                                          self.clicked_pose,
                                          queue_size=1)

        print "Finished initializing, waiting on messages..."
Пример #15
0
def usbl_move(pos):

    broadcaster = tf.TransformBroadcaster()
    if usbl_move.start:
        dt = 2

        loc = np.matrix([
            [0],  #v_x
            [0],  #v_y
            [pos.pose.position.x],  #x
            [pos.pose.position.y]
        ])  #z

        A = np.matrix([[
            1,
            0,
            0,
            0,
        ], [
            0,
            1,
            0,
            0,
        ], [
            dt,
            0,
            1,
            0,
        ], [
            0,
            dt,
            0,
            1,
        ]])
        B = np.matrix([0])
        C = np.eye(loc.shape[0])
        Q = np.eye(loc.shape[0]) * 0.5
        R = np.eye(loc.shape[0]) * 5000
        P = np.eye(loc.shape[0])
        U = np.matrix([[0]])
        Z = np.matrix([[0], [0], [0], [0]])

        usbl_move.kalman = kalman_filter.kalman_filter(A, B, C, Q, P, R, loc)
        usbl_move.md_filter = md_filter.md_filter(2, [1.75, 1.6], 10, [0, 1])
        usbl_move.start = 0

    if usbl_move.md_filter.update([pos.pose.position.x, pos.pose.position.y]):
        #update.ax.scatter(pos.position.x,pos.position.y,-1*current.position.z,color='b')
        current.position.x = pos.pose.position.x
        current.position.y = pos.pose.position.y
        #current.position.z = pos.pose.position.z
        #update('b')
        Z = np.matrix([[0], [0], [pos.pose.position.x], [pos.pose.position.y]])
        U = np.matrix([[0]])
        usbl_move.kalman.move(U, Z)
        kalman_pos = usbl_move.kalman.getState()
        current.position.y = kalman_pos[2]
        current.position.x = kalman_pos[3]

    broadcaster.sendTransform(
        (current.position.x, current.position.y, current.position.z),
        (current.orientation.x, current.orientation.y, current.orientation.z,
         current.orientation.w), rospy.Time.now(), "body", "odom")
Пример #16
0
    def talker(self):
        rospy.Subscriber(self.imu_topic, Imu, self.imu_callback)
        odom_broadcaster = tf.TransformBroadcaster()
        odom_pub = rospy.Publisher(self.odom_topic, Odometry, queue_size=10)
        depth_pub = rospy.Publisher(self.depth_topic, Range, queue_size=10)
        temperature_pub = rospy.Publisher(self.temperature_topic,
                                          Temperature,
                                          queue_size=10)
        rospy.init_node('sonar_publisher', anonymous=True)
        odom_msg = Odometry()
        odom_quart = Quaternion()
        depth_msg = Range()
        temperature_msg = Temperature()

        odom_msg.pose.covariance = [0] * 36
        odom_msg.twist.covariance = [0] * 36

        r = rospy.Rate(20)

        self.x_pos = 0.0
        self.y_pos = 0.0
        self.theta = 0.0

        while not rospy.is_shutdown():
            # get linear velocity
            data = self.DST.next()
            # print data
            if data is not None:
                # print data
                for key, value in data.iteritems():
                    if key == "speed":
                        self.v_x = value
                        rospy.loginfo("speed is %f", self.v_x)
                    elif key == "depth":
                        self.depth = value
                        rospy.loginfo("depth is %f", self.depth)
                    elif key == "temperature":
                        self.temperature = value
                        rospy.loginfo("temperature is %f", self.temperature)
                    else:
                        rospy.loginfo("data received is invalid")
            else:
                rospy.loginfo("no data is reveived")

            # get current time
            self.current_time = rospy.Time.now()
            # change in time
            self.DTT = (self.current_time - self.last_time).to_sec()
            rospy.loginfo("time is %f", self.DTT)

            # angular displacement
            self.delta_theta = self.imu_z * self.DTT
            # linear displacement in x
            self.delta_x = (self.v_x * math.cos(self.theta)) * self.DTT
            # linear displacement in y
            self.delta_y = (self.v_x * math.sin(self.theta)) * self.DTT

            # current position
            self.x_pos += self.delta_x
            self.y_pos += self.delta_y
            self.theta += self.delta_theta

            # calculate heading in quaternion
            # rotate z upside down
            odom_quart = tf.transformations.\
                quaternion_from_euler(math.pi, 0, self.theta)
            # rospy.loginfo(odom_quart)
            odom_broadcaster.sendTransform((self.x_pos, self.y_pos, 0),
                                           odom_quart, rospy.Time.now(),
                                           self.fixed_frame, self.odom_frame)

            odom_msg.header.stamp = self.current_time
            odom_msg.header.frame_id = self.odom_frame
            odom_msg.pose.pose.position.x = self.x_pos
            odom_msg.pose.pose.position.y = self.y_pos
            odom_msg.pose.pose.position.z = 0.0
            odom_msg.pose.pose.orientation.x = odom_quart[0]
            odom_msg.pose.pose.orientation.y = odom_quart[1]
            odom_msg.pose.pose.orientation.z = odom_quart[2]
            odom_msg.pose.pose.orientation.w = odom_quart[3]
            odom_msg.pose.covariance[0] = 0.00001
            odom_msg.pose.covariance[7] = 0.00001
            odom_msg.pose.covariance[14] = 1000000000000.0
            odom_msg.pose.covariance[21] = 1000000000000.0
            odom_msg.pose.covariance[28] = 1000000000000.0
            odom_msg.pose.covariance[35] = 0.001

            odom_msg.child_frame_id = self.fixed_frame
            odom_msg.twist.twist.linear.x = self.v_x
            odom_msg.twist.twist.linear.y = 0.0
            odom_msg.twist.twist.linear.z = 0.0
            odom_msg.twist.twist.angular.x = 0.0
            odom_msg.twist.twist.angular.y = 0.0
            odom_msg.twist.twist.angular.z = self.imu_z
            odom_msg.twist.covariance[0] = 0.00001
            odom_msg.twist.covariance[7] = 0.00001
            odom_msg.twist.covariance[14] = 1000000000000.0
            odom_msg.twist.covariance[21] = 1000000000000.0
            odom_msg.twist.covariance[28] = 1000000000000.0
            odom_msg.twist.covariance[35] = 0.001

            depth_msg.radiation_type = Range.ULTRASOUND
            depth_msg.header.stamp = self.current_time
            depth_msg.header.frame_id = self.odom_frame
            depth_msg.field_of_view = 3 * 2 / math.pi
            depth_msg.min_range = 0.2
            depth_msg.max_range = 80
            depth_msg.range = self.depth

            temperature_msg.header.stamp = self.current_time
            temperature_msg.header.frame_id = self.odom_frame
            temperature_msg.temperature = self.temperature
            temperature_msg.variance = 0.0

            odom_pub.publish(odom_msg)
            depth_pub.publish(depth_msg)
            temperature_pub.publish(temperature_msg)

            self.last_time = self.current_time
            r.sleep()
Пример #17
0
def pose_move(pos):

    #pos.position.z is in kPa, has to be convereted to depth
    # P  = P0 + pgz ----> pos.position.z = P0 + pg*z_real
    #z_real = -(1000*pos.position.z-101.325)/9.81
    z_real = -pos.position.z
    toDegree = 180 / math.pi
    current.position.z = z_real
    broadcaster = tf.TransformBroadcaster()
    #set up the Kalman Filter
    #tf.transformations.quaternion_from_euler()
    (roll, pitch, yaw) = tf.transformations.euler_from_quaternion([
        -1 * pos.orientation.y, pos.orientation.x, -1 * pos.orientation.z,
        pos.orientation.w
    ])

    roll = roll * toDegree
    pitch = pitch * toDegree
    yaw = yaw * toDegree
    s = 'The value of roll is ' + repr(roll) + ', and pitch is ' + repr(
        pitch) + ', yaw = ' + repr(yaw)
    print s

    if pose_move.start:
        dt = .02
        pos = np.matrix([
            [0],  # v_x
            [0],  # v_y
            [0],  # v_z
            [roll],  # x
            [pitch],  # y
            [yaw]
        ])  # z

        A = np.matrix([[
            1,
            0,
            0,
            0,
            0,
            0,
        ], [
            0,
            1,
            0,
            0,
            0,
            0,
        ], [
            0,
            0,
            1,
            0,
            0,
            0,
        ], [
            dt,
            0,
            0,
            1,
            0,
            0,
        ], [
            0,
            dt,
            0,
            0,
            1,
            0,
        ], [
            0,
            0,
            dt,
            0,
            0,
            1,
        ]])
        B = np.matrix([0])
        C = np.eye(pos.shape[0])
        Q = np.eye(pos.shape[0]) * .5
        R = np.eye(pos.shape[0]) * 500
        P = np.eye(pos.shape[0])
        U = np.matrix([[0]])
        Z = np.matrix([[0], [0], [0], [0], [0], [0]])

        pose_move.kalman = kalman_filter.kalman_filter(A, B, C, Q, P, R, pos)

        pose_move.start = 0

    Z = np.matrix([[0], [0], [0], [roll], [pitch], [yaw]])
    U = np.matrix([[0]])
    pose_move.kalman.move(U, Z)
    pos = pose_move.kalman.getState()

    roll = pos[3]
    pitch = pos[4]
    yaw = pos[5]

    # quad = tf.transformations.quaternion_from_euler(roll/toDegree, pitch/toDegree, yaw/toDegree )
    quad = tf.transformations.quaternion_from_euler(0, 0, yaw / toDegree)

    current.orientation.x = quad[0]
    current.orientation.y = quad[1]
    current.orientation.z = quad[2]
    current.orientation.w = quad[3]

    broadcaster.sendTransform(
        (current.position.x, current.position.y, current.position.z),
        (current.orientation.x, current.orientation.y, current.orientation.z,
         current.orientation.w), rospy.Time.now(), "body", "odom")
Пример #18
0
    def __init__(self, rate=10):
        # self.pub_Image = rospy.Publisher('image_raw_sync', SesnorImage, queue_size=1)
        # self.pub_Cam_Info = rospy.Publisher('camera_info_sync', CameraInfo, queue_size=1)
        # self.pub_Lidar = rospy.Publisher('rslidar_points_sync', PointCloud2, queue_size=1)
        self.imuInput = rospy.Subscriber("/self/usv/imu",
                                         Imu,
                                         self.imuCallback,
                                         queue_size=2)
        self.gpsInput = rospy.Subscriber('/self/usv/gps',
                                         NavSatFix,
                                         self.gpsCallback,
                                         queue_size=2)
        self.gpsVeloInput = rospy.Subscriber('/self/usv/gps/fix_velocity',
                                             Vector3Stamped,
                                             self.gpsVeloCallback,
                                             queue_size=2)

        self.targetGpsInput = rospy.Subscriber('/target/usv_2/gps',
                                               NavSatFix,
                                               self.targetGpsCallback,
                                               queue_size=2)
        self.targetGpsVeloInput = rospy.Subscriber(
            '/target/usv_2/gps/fix_velocity',
            Vector3Stamped,
            self.targetGpsVeloCallback,
            queue_size=2)

        self.sensor_pub = rospy.Publisher('/base/sensor',
                                          BaseSensor,
                                          queue_size=1)
        # self.ts = message_filters.TimeSynchronizer([self.imuInput
        #                                             , self.gpsInput
        #                                             ], 10)
        # self.ts.registerCallback(self.general_callback)
        self.br_boat_world = tf.TransformBroadcaster()
        self.br_lidar_boat = tf.TransformBroadcaster()
        self.br_lidar_boat2 = tf.TransformBroadcaster()
        self.br_lidar_boat3 = tf.TransformBroadcaster()
        self.br_camera_boat = tf.TransformBroadcaster()

        self.gps_velo_msg = Vector3Stamped()
        self.target_gps_msg = NavSatFix()
        self.target_gps_velo_msg = Vector3Stamped()

        self.imuflag = False
        self.gpsflag = False

        self.lat = d2r(30.06022459407145675)
        self.lon = d2r(120.173913575780311191)

        rate = rospy.Rate(rate)

        while not rospy.is_shutdown():
            if self.imuflag and self.gpsflag:
                print('process')
                self.general_callback(self.imu_msg, self.gps_msg,
                                      self.gps_velo_msg, self.target_gps_msg,
                                      self.target_gps_velo_msg)
                self.imuflag = False
                self.gpsflag = False

            rate.sleep()
Пример #19
0
def main():
    rospy.init_node('leica_tf_broadcaster', anonymous=True)
    while (not rospy.is_shutdown()):
        PrismLowerLeftGate_broadcaster1 = tf.TransformBroadcaster()
        PrismTopGate_broadcaster2 = tf.TransformBroadcaster()
        PrismLowerRightGate_broadcaster3 = tf.TransformBroadcaster()

        RobotLeftPrism_broadcaster1 = tf.TransformBroadcaster()
        RobotTopPrism_broadcaster2 = tf.TransformBroadcaster()
        RobotRightPrism_broadcaster3 = tf.TransformBroadcaster()

        ############## Static Transform from Gate Origin to respective prism ##############
        #create a quaternion
        rotation_quaternion = tf.transformations.quaternion_from_euler(
            0.0, 0.0, 0.0)

        #translation vector

        #According to Sheet for older terrain
        # Prism1_translation_vector1 = (0.0,1.592 , 1.11)
        # Prism2_translation_vector2 = (0.0, 0.0, 1.82)
        # Prism3_translation_vector3 = (0.0, -1.671, 0.79)

        Prism1_translation_vector1 = (0.0, 1.523, 1.07)
        Prism2_translation_vector2 = (0.0, 0.0, 1.83)
        Prism3_translation_vector3 = (0.0, -1.394, 0.755)

        ############## Static Transform from Robot_Origin to respective prism ##############
        #create a quaternion
        RobotLeftPrism_rotation_quaternion = tf.transformations.quaternion_from_euler(
            0.0, 0.0, 0.0)
        RobotTopPrism_rotation_quaternion = tf.transformations.quaternion_from_euler(
            0.0, 0.0, 0.0)
        RobotRightPrism_rotation_quaternion = tf.transformations.quaternion_from_euler(
            0.0, 0.0, 0.0)

        #translation vector
        # RobotLeftPrism_translation_vector1 = (-0.2513, 0.2, 0.2833)
        # RobotTopPrism_translation_vector2 = (0.0013, -0.2, 0.2833)
        # RobotRightPrism_translation_vector3 = (-0.2513, -0.2, 0.2833)
        RobotLeftPrism_translation_vector1 = (-0.045, 0.100025, 0.0174602)
        RobotTopPrism_translation_vector2 = (0.045, -0.100025, 0.0174602)
        RobotRightPrism_translation_vector3 = (-0.045, -0.100025, 0.0174602)

        # #For Updated Prism Rig
        # RobotLeftPrism_translation_vector1 = (0.3929, 0.100025, 0.5247)
        # RobotTopPrism_translation_vector2 = (0.4829, -0.100025, 0.5247)
        # RobotRightPrism_translation_vector3 = (0.3929, -0.100025, 0.5247)

        #time
        current_time = rospy.Time.now()

        PrismLowerLeftGate_broadcaster1.sendTransform(
            Prism1_translation_vector1, rotation_quaternion, current_time,
            "PrismLeftLowerGate", "Gate_Origin")  #child frame, parent frame
        PrismTopGate_broadcaster2.sendTransform(
            Prism2_translation_vector2, rotation_quaternion, current_time,
            "PrismTopGate", "Gate_Origin")  #child frame, parent frame
        PrismLowerRightGate_broadcaster3.sendTransform(
            Prism3_translation_vector3, rotation_quaternion, current_time,
            "PrismRightLowerGate", "Gate_Origin")  #child frame, parent frame

        RobotLeftPrism_broadcaster1.sendTransform(
            RobotLeftPrism_translation_vector1,
            RobotLeftPrism_rotation_quaternion, current_time, "RobotLeftPrism",
            "Robot_Origin")  #child frame, parent frame
        RobotTopPrism_broadcaster2.sendTransform(
            RobotTopPrism_translation_vector2,
            RobotTopPrism_rotation_quaternion, current_time, "RobotTopPrism",
            "Robot_Origin")  #child frame, parent frame
        RobotRightPrism_broadcaster3.sendTransform(
            RobotRightPrism_translation_vector3,
            RobotRightPrism_rotation_quaternion, current_time,
            "RobotRightPrism", "Robot_Origin")  #child frame, parent frame

        time.sleep(0.5)

    rospy.spin()
Пример #20
0
  def __init__(self):
    # Node initiation with log level debug by default
    rospy.init_node('simple_aruco_localisation', log_level=rospy.DEBUG)
    # IIND Format    
    rospy.loginfo("--------------------------------------------------")
    rospy.loginfo(" Integrated Innovation for Nuclear Decomissioning ")
    rospy.loginfo("           Simple Aruco Localisation              ")
    rospy.loginfo(" partner: Wood                                    ")
    rospy.loginfo("--------------------------------------------------")

    #> Attributes


    #> Parameters
    frequency = rospy.get_param('~frequency', 50)
    rospy.loginfo("Frequency: %f", frequency)
    rate = rospy.Rate(frequency)
    self.parent_id = rospy.get_param('~parent_id', 'world')
    self.child_id = rospy.get_param('~child_id', 'marker')
    self.camera_optical_frame_id = rospy.get_param('~camera_optical_frame_id','i3dr_stereo_cameraLeft_optical')
    self.marker_frame_id = rospy.get_param('~marker_frame_id','frame_marker_1')
    rospy.loginfo("Frequency: %f", frequency)
    camera_frames = [self.camera_optical_frame_id]
    marker_frames = [self.marker_frame_id]
    
    tfl = tf.TransformListener()
    tft = tf.TransformerROS()
    tfb = tf.TransformBroadcaster()

    ## Loop
    while not rospy.is_shutdown():

      try:
        temps = rospy.Time(0)
        ts,oks = [],[False,False]
        for i in range(len(camera_frames)):
          if tfl.canTransform( camera_frames[i], marker_frames[i], temps): # Should check with fiducial transforms
            (t, q) = tfl.lookupTransform(camera_frames[i], marker_frames[i], temps)
            ts.append(t)
            oks[i] = True

        if not oks[0] and not oks[1]:
          rospy.logerr_throttle(1,"Transformations not available")
          continue
        else:
          if not oks[1]:
            marker_frame = marker_frames[0]
          elif not oks[0]:
            marker_frame = marker_frames[1]
          else:
            # Check which marker is closer and choose that location
            if np.linalg.norm(ts[0]) <= np.linalg.norm(ts[1]):
              marker_frame = marker_frames[0]
            else:
              marker_frame = marker_frames[1]

          rospy.loginfo_throttle(1,marker_frame.split('_')[0])
          (t_w_m, q_w_m) = tfl.lookupTransform("world", marker_frame, temps)
          tfb.sendTransform(t_w_m, tf.transformations.quaternion_from_euler(q_w_m[0],q_w_m[1],q_w_m[2]), temps, self.child_id, self.parent_id)
      
      except tf.Exception as e:
        rospy.logwarn("error while waiting for frames: {0}".format(e))
      
      rate.sleep()
Пример #21
0
	def follow(self):
		if not (self.target_found and self.object_location != None):
			return
		pid = self.camera_pid
		pid.kp = -0.0005
		pid.kd = 0.005
		target_value = 0.0
		current_value = self.object_location
	
		print current_value

		ang = pid.compute(current_value, target_value)

		keep_distance = 0.3
		spectrum = 20	
		mid = np.amin(np.concatenate((self.laser_values[-spectrum:], self.laser_values[:spectrum]), axis=0) )
		#right = np.amin(self.laser_values[-2*spectrum:-spectrum])
		#left = np.amin(self.laser_values[spectrum:spectrum*2])

		#print 'left', left, 'mid', mid, 'right', right, ' ang', ang

		if self.target_found:
			if abs(current_value)<2:
				distance = min(self.laser_values[0],3)
				if distance < 0.1:
					return
				client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
				client.wait_for_server()
				goal = MoveBaseGoal()
				goal.target_pose.header.frame_id = "base_link"
				goal.target_pose.header.stamp = rospy.Time.now()
				# Move 0.5 meters forward along the x axis of the "map" coordinate frame 
				goal.target_pose.pose.position.x = distance - 0.4
				# No rotation of the mobile base frame w.r.t. map frame
				goal.target_pose.pose.orientation.w = 1.0
				print 'going on goal', goal
				time.sleep(3.0)
				SimpleActionClient = client.send_goal(goal)

				time.sleep(2.0)
				client.cancel_goal()

				# Waits for the server to finish performing the action.
				wait = client.wait_for_result()


				if not wait:
					rospy.logerr("Action server not available!")
					rospy.signal_shutdown("Action server not available!")
				else:
				# Result of executing the action
					print 'result', client.get_result()



				br = tf.TransformBroadcaster()
				
				br.sendTransform((distance, 0.0, 0.0),(0.0, 0.0, 0.0, 1.0),rospy.Time.now(),"/carrot1","/base_link")
				ls = tf.TransformListener()
				#print ls.allFramesAsString() 
				#print 'carrot', ls.frameExists("/carrot1"), 'map', ls.frameExists("map"), 'base_link', ls.frameExists("base_link")
				if ls.frameExists("carrot1") and ls.frameExists("map"):			
					t = ls.getLatestCommonTime("carrot1", "map")		
					position, quaternion = ls.lookupTransform("carrot1", "map", t)
					print 'pos', position
				return
				if mid<1.5:
					#drive(mid - keep_distance ,ang)
					vel = mid - keep_distance
				else:
					#drive(1,ang)
					vel = 1
		else:
			#drive(0,0)
			vel = 0
			ang = 0



		vel=0
		self.drive(vel,-ang)
Пример #22
0
    def __init__(self):
        rospy.init_node('arduino_relay')

        self._imu_data = {}

        self._lock = threading.RLock()

        # self.imu_calibration_loaded = False
        # self.imu_calibration_loaded_time = rospy.Time(0)

        self.diagnostics_msg_count = 0

        self.diagnostics_buffer = []

        # http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20broadcaster%20%28Python%29
        self.tf_br = tf.TransformBroadcaster()
        #self.tf2_br = tf2_ros.TransformBroadcaster()

        #rospy.Service('~reset_odometry', Empty, self.reset_odometry)

        ## Publishers.

        self.diagnostics_pub = rospy.Publisher('/diagnostics',
                                               DiagnosticArray,
                                               queue_size=10)

        self.odometry_pub = rospy.Publisher('/odom', Odometry, queue_size=10)

        self.imu_calibration_load_pub = rospy.Publisher(
            '/torso_arduino/imu_calibration_load',
            UInt16MultiArray,
            queue_size=1)

        self.imu_pub = rospy.Publisher('/imu_data', Imu, queue_size=10)

        self.joint_pub = rospy.Publisher('joint_states',
                                         JointState,
                                         queue_size=10)

        self._joint_pub_lock = threading.RLock()

        self.last_pan_angle = None
        self.last_tilt_angle = None
        self.received_angles = False

        self._joint_pub_thread = threading.Thread(
            target=self.publish_joints_thread)
        self._joint_pub_thread.daemon = True
        self._joint_pub_thread.start()

        ## Head Subscribers.

        rospy.Subscriber('/head_arduino/diagnostics_relay', String,
                         partial(self.on_diagnostics_relay, prefix='head'))

        rospy.Subscriber('/head_arduino/pan_degrees', Int16,
                         self.on_head_pan_degrees)

        rospy.Subscriber('/head_arduino/tilt_degrees', Int16,
                         self.on_head_tilt_degrees)

        ## Torso Subscribers.

        rospy.Subscriber('/torso_arduino/diagnostics_relay', String,
                         partial(self.on_diagnostics_relay, prefix='torso'))

        # rospy.Subscriber('/torso_arduino/imu_calibration_save', UInt16MultiArray, self.on_imu_calibration_save)

        rospy.Subscriber('/torso_arduino/imu_relay', String, self.on_imu_relay)

        rospy.Subscriber('/torso_arduino/odometry_relay', String,
                         self.on_odometry_relay)

        rospy.Subscriber('/torso_arduino/power_shutdown', Bool,
                         self.on_power_shutdown)

        self._odom_lock = threading.RLock()
        self.pos = None
        self.ori = None

        # self._motor_target_left = None
        # self._motor_target_right = None
        # rospy.Subscriber('/torso_arduino/motor_target_a', Int16, partial(self.on_motor_target, side='left'))
        # rospy.Subscriber('/torso_arduino/motor_target_b', Int16, partial(self.on_motor_target, side='right'))
        # self._motor_encoder_left = None
        # self._motor_encoder_right = None
        # rospy.Subscriber('/torso_arduino/motor_encoder_a', Int16, partial(self.on_motor_encoder, side='left'))
        # rospy.Subscriber('/torso_arduino/motor_encoder_b', Int16, partial(self.on_motor_encoder, side='right'))

        ## Begin IO.

        rospy.spin()
 def __init__(self):
     self.br = tf.TransformBroadcaster()
     self.tf_send_divider = 10
     self.tf_send_count = 0
     self.sub_odom = rospy.Subscriber("/proc_navigation/odom", Odometry,
                                      self.odom_callback)
Пример #24
0

x = 0.0
y = 0.0
theta = 0.0

vx = 0.0
vy = 0.0
vth = 0.0

dc = 0.0
dr = 0.0
dl = 0.0
dt = 0.0
dx = 0.0
dy = 0.0
dtheta = 0.0

current_time = rospy.Time.now()
last_time = rospy.Time.now()

if __name__ == '__main__':
    try:
        odom_pub = rospy.Publisher("/odom", Odometry, queue_size=50)
        tick_sub = rospy.Subscriber("/TicksPublisher", EncoderTick,
                                    tickcallback)
        odom_broadcaster = tf.TransformBroadcaster()
        rospy.spin()
    except rospy.ROSInterruptException:
        pass
Пример #25
0
    def __init__(self, reward=NullReward(),
                 namespace='/costar',
                 observe=None,
                 robot_config=None,
                 lfd=None,
                 tf_listener=None,
                 use_default_pose=False,
                 *args, **kwargs):
        super(CostarWorld, self).__init__(reward, *args, **kwargs)

        # This is the set of trajectory data we will use for learning later on.
        self.trajectories = {}

        # This is the set of object information we will use for learning later on
        # It tells us which objects/features each individual skill should depend on
        # and is used when extracting a set of features.
        self.objs = {}

        # This is extra data -- such as world state observations -- that is
        # associated with our training trajectories.
        self.trajectory_data = {}

        # This is where we actually store all the information about our learned
        # skills.
        self.models = {}

        # ------------------------- VISUALIZATION TOOLS ---------------------------
        # These are publishers for pose arrays that help us visualize data and
        # learned actions.
        self.traj_pubs = {}
        self.traj_data_pubs = {}
        self.skill_pubs = {}
        self.tf_listener = tf_listener
        if self.tf_listener is None:
            self.tf_listener = tf.TransformListener()


        # Other things
        self.observe = observe
        self.predicates = []
        self.namespace = namespace

        # This is the current state of all non-robot objects in the world --
        # which is to say, for now, it's just a dictionary of frames by TF frame
        # identifier.
        self.observation = {}

        # Object class information
        # TODO(cpaxton): this is a duplicate, remove it after state has been
        # fixed a little
        self.object_by_class = {}
        self.objects_to_track = []

        if robot_config is None:
            raise RuntimeError('Must provide a robot config!')
        elif not isinstance(robot_config, list):
            robot_config = [robot_config]

        # -------------------------- ROBOT INFORMATION ----------------------------
        # Base link, end effector, etc. for easy reference
        # set up actors and things
        self.tf_pub = tf.TransformBroadcaster()


        # Create and add all the robots we want in this world.
        for i, robot in enumerate(robot_config):

            name = robot['name']

            if robot['q0'] is not None:
                s0 = CostarState(i,
                        q=robot['q0'],
                        dq=np.zeros_like(robot['q0']))
            else:
                s0 = CostarState(self, i, None, None)
            self.addActor(
                CostarActor(robot,
                            state=s0,
                            dynamics=self.getT(robot),
                            policy=NullPolicy()))

        # -------------------------------------------------------------------------
        # For grippers. We check these on a _update_environment() to get the current gripper
        # state.
        self.gripper_status_listeners = {}

        # -------------------------------------------------------------------------
        # Visualization helper
        self.plan_pub = rospy.Publisher(
            join(self.namespace, "plan"),
            PoseArray,
            queue_size=1000)

        # The base link for the scene as a whole
        self.base_link = self.actors[0].base_link

        if not lfd:
            self.lfd = LfD(self.actors[0].config)
        else:
            self.lfd = lfd
Пример #26
0
    def callbackPoseRCNN(self, data):
        # recive data
        objArray = Detection2DArray()

        # rcnn_pose
        objArray = data
        # rospy.logdebug(" lenth objArray.detections: %f", len(objArray.detections))
        size_filter = 10

        if len(objArray.detections) != 0:
            # align coordinate axis X
            neuralx_current = (
                -1) * objArray.detections[0].results[0].pose.pose.position.z
            neuraly_current = (
                -1) * objArray.detections[0].results[0].pose.pose.position.x
            neuralz_current = objArray.detections[0].results[
                0].pose.pose.position.y
            # rospy.logdebug("--------------------------------")
            # rospy.logdebug("rcnn_pose.x (m): %f", VecNeuralx_current)
            # rospy.logdebug("rcnn_pose.y (m): %f", VecNeuraly_current)
            # rospy.logdebug("rcnn_pose.z (m): %f", neuralz_current)

            ##############################################################

            # Filter list_x
            if len(self.list_x) < size_filter:
                self.list_x.append(neuralx_current)
                # rospy.logdebug("--------------------------------")
                # rospy.logdebug('Size of list: %f',len(self.list_x))
                # rospy.logdebug('****Incomplete List')
                self.VecNeural_x_previous = neuralx_current
            else:
                diff = abs(neuralx_current - self.VecNeural_x_previous)
                # rospy.logdebug('------------------------------')
                # rospy.logdebug('Diff points: %f',diff)

                if diff > 0.2 and self.VecNeural_x_previous != 0:
                    self.VecNeural_x_previous = neuralx_current
                    # self.VecNeural.x = 0
                    # rospy.logdebug('------------------------------')
                    # rospy.logdebug('****No capture!')

                else:
                    self.list_x.append(neuralx_current)
                    del self.list_x[0]
                    neuralx_current = sum(self.list_x) / len(self.list_x)
                    self.VecNeural.x = neuralx_current

                    # rospy.logdebug('------------------------------')
                    # rospy.logdebug('Size of list_x:  %f',len(self.list_x))
                    # rospy.logdebug('Sum List list_x: %f',sum(self.list_x))
                    # rospy.logdebug('Med List list_x: %f',self.VecNeural.x)

                    self.VecNeural_x_previous = neuralx_current

            ##############################################################

            # Filter list_y
            if len(self.list_y) < size_filter:
                self.list_y.append(neuraly_current)
                # rospy.logdebug("--------------------------------")
                # rospy.logdebug('Size of list: %f',len(self.list_y))
                # rospy.logdebug('****Incomplete List')
                self.VecNeural_y_previous = neuraly_current
            else:
                diff = abs(neuraly_current - self.VecNeural_y_previous)
                # rospy.logdebug('------------------------------')
                # rospy.logdebug('Diff points: %f',diff)

                if diff > 0.2 and self.VecNeural_y_previous != 0:
                    self.VecNeural_y_previous = neuraly_current
                    # self.VecNeural.y = 0
                    # rospy.logdebug('------------------------------')
                    # rospy.logdebug('****No capture!')

                else:
                    self.list_y.append(neuraly_current)
                    del self.list_y[0]
                    neuraly_current = sum(self.list_y) / len(self.list_y)
                    self.VecNeural.y = neuraly_current

                    # rospy.logdebug('------------------------------')
                    # rospy.logdebug('Size of list_y:  %f',len(self.list_y))
                    # rospy.logdebug('Sum List list_y: %f',sum(self.list_y))
                    # rospy.logdebug('Med List list_y: %f',self.VecNeural.y)

                    self.VecNeural_y_previous = neuraly_current

            ##############################################################

            # Filter list_z
            if len(self.list_z) < size_filter:
                self.list_z.append(neuralz_current)
                # rospy.logdebug("--------------------------------")
                # rospy.logdebug('Size of list: %f',len(self.list_z))
                # rospy.logdebug('****Incomplete List')
                self.VecNeural_z_previous = neuralz_current
            else:
                diff = abs(neuralz_current - self.VecNeural_z_previous)
                # rospy.logdebug('------------------------------')
                # rospy.logdebug('Diff points: %f',diff)

                if diff > 0.4 and self.VecNeural_z_previous != 0:
                    self.VecNeural_z_previous = neuralz_current
                    # self.VecNeural.z = 0
                    # rospy.logdebug('------------------------------')
                    # rospy.logdebug('****No capture!')

                else:
                    self.list_z.append(neuralz_current)
                    del self.list_z[0]
                    neuralz_current = sum(self.list_z) / len(self.list_z)
                    self.VecNeural.z = neuralz_current

                    # rospy.logdebug('------------------------------')
                    # rospy.logdebug('Size of list_z:  %f',len(self.list_z))
                    # rospy.logdebug('Sum List list_z: %f',sum(self.list_z))
                    # rospy.logdebug('Med List list_z: %f',self.VecNeural.z)

                    self.VecNeural_z_previous = neuralz_current

            self.rcnn_odom.header.stamp = rospy.Time.now()
            self.rcnn_odom.header.seq = self.Keyframe_rcnn

            # stabilize angles and align transformations
            explicit_quat = [
                self.OriAruco.x, self.OriAruco.y, self.OriAruco.z,
                self.OriAruco.w
            ]
            euler = tf.transformations.euler_from_quaternion(explicit_quat)
            # roll = euler[0]
            # pitch = euler[1]
            yaw = euler[2]

            # # since all odometry is 6DOF we'll need a quaternion created from yaw
            odom_quat = tf.transformations.quaternion_from_euler(0, 0, -yaw)

            # set the position
            self.rcnn_odom.pose.pose = Pose(self.VecNeural,
                                            Quaternion(*odom_quat))

            tf_rcnn_to_drone = tf.TransformBroadcaster()
            tf_rcnn_to_drone.sendTransform(
                (self.VecNeural.x, self.VecNeural.y, self.VecNeural.z),
                odom_quat, self.rcnn_odom.header.stamp, "rcnn_odom",
                self.PARENT_NAME)  #world

            self.Keyframe_rcnn += 1

            self.odom_rcnn_pub.publish(self.rcnn_odom)
Пример #27
0
 def __init__(self, link_to_robot, tf_prefix):
     self.broadcaster = tf.TransformBroadcaster()
     self.listener = tf.TransformListener()
     self.link_to_robot = link_to_robot
     self.odom_data = Odometry()
     self.tf_prefix = tf_prefix
Пример #28
0
    def __init__(self):
        super(Subscriber, self).__init__()
        rospy.init_node('filter_node', anonymous=False, log_level=rospy.DEBUG)

        self.PARENT_NAME = rospy.get_param('~parent_name', "camera_odom")
        rospy.logdebug("%s is %s default %s",
                       rospy.resolve_name('~parent_name'), self.PARENT_NAME,
                       "camera_odom")

        self.kalman = Kalman(n_states=3, n_sensors=6)
        self.kalman.P *= 10
        self.kalman.R *= 0.02

        self.VecNeural = Vector3()
        self.VecAruco = Vector3()
        self.OriAruco = Quaternion(0, 0, 0, 1)

        self.Keyframe_aruco = 0
        self.Keyframe_rcnn = 0
        self.Keyframe_hybrid = 0

        self.aruco_odom = Odometry()
        self.aruco_odom.header.stamp = rospy.Time.now()
        self.aruco_odom.header.frame_id = "aruco_odom"
        self.aruco_odom.header.seq = self.Keyframe_aruco
        self.aruco_odom.child_frame_id = self.PARENT_NAME

        self.rcnn_odom = Odometry()
        self.rcnn_odom.header.stamp = rospy.Time.now()
        self.rcnn_odom.header.frame_id = "rcnn_odom"
        self.rcnn_odom.header.seq = self.Keyframe_rcnn
        self.rcnn_odom.child_frame_id = self.PARENT_NAME

        self.list_x = []
        self.list_y = []
        self.list_z = []

        self.VecNeural_x_previous = 0
        self.VecNeural_y_previous = 0
        self.VecNeural_z_previous = 0

        self.list_kalma_x = []
        self.list_kalma_y = []
        self.list_kalma_z = []

        self.filtered = Vector3()

        # Publishers
        self.pub_hybrid = rospy.Publisher('kalman/hybrid_raw',
                                          Vector3,
                                          queue_size=1)
        self.pub_hybrid_filtred = rospy.Publisher('kalman/hybrid_filtred',
                                                  Vector3,
                                                  queue_size=1)
        self.odom_filter_pub = rospy.Publisher("odom_filter",
                                               Odometry,
                                               queue_size=1)
        self.odom_rcnn_pub = rospy.Publisher("odom_rcnn",
                                             Odometry,
                                             queue_size=1)
        self.odom_aruco_pub = rospy.Publisher("odom_aruco",
                                              Odometry,
                                              queue_size=1)

        rospy.Subscriber("rcnn/objects",
                         Detection2DArray,
                         self.callbackPoseRCNN,
                         queue_size=1)
        rospy.Subscriber("aruco_double/pose",
                         Pose,
                         self.callbackPoseAruco,
                         queue_size=1)

        r = rospy.Rate(60.0)
        while not rospy.is_shutdown():
            neural = [self.VecNeural.x, self.VecNeural.y, self.VecNeural.z]
            aruco = [self.VecAruco.x, self.VecAruco.y, self.VecAruco.z]

            mat = np.matrix(np.concatenate((neural, aruco), axis=None)).getT()

            if self.kalman.first:
                # insert the first 3 values of the vector
                self.kalman.x = mat[0:3, :]
                self.kalman.first = False

            current_time = rospy.Time.now()
            dt_aruco = (current_time - self.aruco_odom.header.stamp).to_sec()
            dt_rcnn = (current_time - self.rcnn_odom.header.stamp).to_sec()

            # module_rcnn = math.sqrt(abs(mat[3]**2)+abs(mat[4]**2)+abs(mat[5]**2))
            # module_aru = math.sqrt(abs(mat[0]**2)+abs(mat[1]**2)+abs(mat[2]**2))

            # rospy.logdebug("------------------------")
            # rospy.logdebug("module_aru : %f", module_aru)
            # rospy.logdebug("module_rcnn : %f", module_rcnn)

            ##################################################################################

            # rcnn = 0
            if mat[2] == 0 or dt_rcnn > 5.0:
                covNeural = 10
                covAruco = 0.01 * abs(self.kalman.x[2]) + 1.0
                # rospy.logdebug("*****Neural = 0 ou stoped!*****")

            # aruco = 0
            elif mat[5] == 0 or dt_aruco > 5.0:
                covNeural = (1.5 / (abs(self.kalman.x[2]) + 0.1)) + 1.0
                covAruco = 10
                # rospy.logdebug("*****aruco = 0 ou stoped!*****")

            else:
                # greater neural error and lower aruco error at low height
                covNeural = (0.05 / (abs(self.kalman.x[2]) + 0.3)) + 0.45
                covAruco = 0.05 * abs(self.kalman.x[2]) + 0.4
                # rospy.logdebug("*****all-run!*****")

            ##################################################################################

            # module_aru_ant = module_aru
            # module_rcnn_ant = module_rcnn

            # set values of cov in R matrix
            arrayNeral = np.full((1, 3), covNeural, dtype=float)
            arrayAruco = np.full((1, 3), covAruco, dtype=float)
            Zarray = np.concatenate((arrayNeral, arrayAruco), axis=None)
            self.kalman.R = np.diag(Zarray)

            # rospy.logdebug("arrayNeral : %f", covNeural)
            # rospy.logdebug("arrayAruco : %f", covAruco)
            # rospy.logdebug("------------------------")

            self.kalman.predict()
            self.kalman.update(mat)

            vec = Vector3()
            vec.x = self.kalman.x[0]
            vec.y = self.kalman.x[1]
            vec.z = self.kalman.x[2]

            # rospy.logdebug("------------------------")
            # rospy.logdebug("kalman.sensor[1].x : %f", vec.x)
            # rospy.logdebug("kalman.sensor[1].y : %f", vec.y)
            # rospy.logdebug("kalman.sensor[1].z : %f", vec.z)

            size_filter_kalman_high = 20

            # Filter list_kalma_x
            if len(self.list_kalma_x) < size_filter_kalman_high:
                self.list_kalma_x.append(vec.x)
            else:
                mean_filter = sum(self.list_kalma_x) / len(self.list_kalma_x)
                if abs(mean_filter - vec.x) > 0.05:
                    self.list_kalma_x.append(vec.x)
                    del self.list_kalma_x[0]
                    self.filtered.x = sum(self.list_kalma_x) / len(
                        self.list_kalma_x)

            # Filter list_kalma_y
            if len(self.list_kalma_y) < size_filter_kalman_high:
                self.list_kalma_y.append(vec.y)
            else:
                mean_filter = sum(self.list_kalma_y) / len(self.list_kalma_y)
                if abs(mean_filter - vec.y) > 0.05:
                    self.list_kalma_y.append(vec.y)
                    del self.list_kalma_y[0]
                    self.filtered.y = sum(self.list_kalma_y) / len(
                        self.list_kalma_y)

            # Filter list_kalma_z
            if len(self.list_kalma_z) < size_filter_kalman_high:
                self.list_kalma_z.append(vec.y)
            else:
                mean_filter = sum(self.list_kalma_z) / len(self.list_kalma_z)
                if abs(mean_filter - vec.z) > 0.05:
                    self.list_kalma_z.append(vec.z)
                    del self.list_kalma_z[0]
                    self.filtered.z = sum(self.list_kalma_z) / len(
                        self.list_kalma_z)

            ##################################################################################
            if dt_aruco < 0.1 or dt_rcnn < 0.1:
                hybrid_odom = Odometry()
                hybrid_odom.header.stamp = rospy.Time.now()
                hybrid_odom.header.frame_id = "hybrid_odom"
                hybrid_odom.header.seq = self.Keyframe_hybrid
                hybrid_odom.child_frame_id = self.PARENT_NAME

                explicit_quat = [
                    self.OriAruco.x, self.OriAruco.y, self.OriAruco.z,
                    self.OriAruco.w
                ]
                euler = tf.transformations.euler_from_quaternion(explicit_quat)
                # roll = euler[0]
                # pitch = euler[1]
                yaw = euler[2]

                # # since all odometry is 6DOF we'll need a quaternion created from yaw
                odom_quat = tf.transformations.quaternion_from_euler(
                    0, 0, -yaw)

                # set the position
                hybrid_odom.pose.pose = Pose(self.filtered,
                                             Quaternion(*odom_quat))

                # transform tf
                tf_hybrid_to_drone = tf.TransformBroadcaster()
                tf_hybrid_to_drone.sendTransform(
                    (self.filtered.x, self.filtered.y, self.filtered.z),
                    odom_quat, hybrid_odom.header.stamp, "hybrid_odom",
                    self.PARENT_NAME)  #world

                ##################################################################################

                self.Keyframe_hybrid += 1

                # publish the message
                self.odom_filter_pub.publish(hybrid_odom)
                self.pub_hybrid_filtred.publish(self.filtered)

            # publish the hybrid raw
            self.pub_hybrid.publish(vec)

            r.sleep()

        try:
            rospy.spin()
        except rospy.ROSInterruptException:
            print("Shutting down")
Пример #29
0
 def broadcast_transform(self, name, position, orientation):
     br = tf.TransformBroadcaster()
     br.sendTransform(position, orientation, rospy.Time.now(), name,
                      "world")
def detect_feature_callback(req):
    # print "----------------------------------------------"
    # print "detect_feature_callback: "
    # print "  camera frame_id [%s] :"%(req.cameraPose.header.frame_id)
    # print "  camera translation from map [%.4f,%.4f,%.4f] : "%(req.cameraPose.pose.position.x,req.cameraPose.pose.position.y,req.cameraPose.pose.position.z)
    # print "  camera orientation (quaternion) [%.4f,%.4f,%.4f,%.4f] : "%(req.cameraPose.pose.orientation.x,req.cameraPose.pose.orientation.y,req.cameraPose.pose.orientation.z,req.cameraPose.pose.orientation.w)
    # print "  feature algorithm [%s] : "%(req.visualFeature.algorithm)
    # print "  feature id [%d] : "%(req.visualFeature.id)
    # print '  feature frame_id : %s_%s%s' % (req.cameraPose.header.frame_id, algorithm_abreviations[req.visualFeature.algorithm], req.visualFeature.id)
    # print "  feature translation [%.4f,%.4f,%.4f] : "%(req.visualFeature.pose.pose.position.x, req.visualFeature.pose.pose.position.y, req.visualFeature.pose.pose.position.z)
    # print "  feature orientation (quaternion) [%.4f,%.4f,%.4f,%.4f] "%(req.visualFeature.pose.pose.orientation.x, req.visualFeature.pose.pose.orientation.y, req.visualFeature.pose.pose.orientation.z, req.visualFeature.pose.pose.orientation.w)

    cN = req.cameraPose.header.frame_id

    if req.visualFeature.id not in features_present:
        print "detect_feature_callback: camera frame_id [%s] : feature id [%d] : feature is not present - is a false positive - not listing as a detection." % (
            req.cameraPose.header.frame_id, req.visualFeature.id)
        response = DetectedFeatureResponse()
        response.acknowledgement = "feature not present"
        return response

    # TODO - something about this conversion is not right: the translation and quaternion match those found by AprilTags_Kaess and sent by DetectedFeatureClient, but the RPY are different
    euler = tf.transformations.euler_from_quaternion([
        req.visualFeature.pose.pose.orientation.x,
        req.visualFeature.pose.pose.orientation.y,
        req.visualFeature.pose.pose.orientation.z,
        req.visualFeature.pose.pose.orientation.w
    ])
    print "  feature x_y_z_w roll=%.4f pitch=%.4f yaw=%.4f" % (
        euler[0], euler[1], euler[2])

    tfBroadcaster = tf.TransformBroadcaster()
    time_now = rospy.Time.now()

    # publish the map-to-camera_pose tf
    tfBroadcaster.sendTransform(
        (req.cameraPose.pose.position.x, req.cameraPose.pose.position.y,
         req.cameraPose.pose.position.z),
        (req.cameraPose.pose.orientation.x, req.cameraPose.pose.orientation.y,
         req.cameraPose.pose.orientation.z, req.cameraPose.pose.orientation.w),
        time_now,
        req.cameraPose.header.
        frame_id,  # to      '/cam/%s/pose' % req.cameraPose.header.frame_id e.g. "/cam/c_1/pose"    # TODO - remove hardcoding to base namespace
        'map')  # from frame

    # publish the map-to-camera_pose tf
    tfBroadcaster.sendTransform(
        (req.cameraPose.pose.position.x, req.cameraPose.pose.position.y,
         req.cameraPose.pose.position.z),
        (req.cameraPose.pose.orientation.x, req.cameraPose.pose.orientation.y,
         req.cameraPose.pose.orientation.z, req.cameraPose.pose.orientation.w),
        time_now,
        req.cameraPose.header.frame_id +
        '_in_detect_feature_req_header',  # to      '/cam/%s/pose' % req.cameraPose.header.frame_id e.g. "/cam/c_1/pose"    # TODO - remove hardcoding to base namespace
        'map')  # from frame

    # create the robot-convention camera body frame, step 1 : +90Y
    tfBroadcaster.sendTransform(
        (0.0, 0.0, 0.0),
        (
            0.0, 0.7071, 0.0, 0.7071
        ),  #  http://www.euclideanspace.com/maths/geometry/rotations/conversions/eulerToQuaternion/steps/index.htm
        time_now,
        req.cameraPose.header.frame_id + 'y',  # to
        req.cameraPose.header.frame_id)  # from

    # create the robot-convention camera body frame, step 2 : +90Z
    tfBroadcaster.sendTransform(
        (0.0, 0.0, 0.0),
        (
            0.0, 0.0, 0.7071, 0.7071
        ),  #  http://www.euclideanspace.com/maths/geometry/rotations/conversions/eulerToQuaternion/steps/index.htm
        time_now,
        req.cameraPose.header.frame_id + 'yz',  # to
        req.cameraPose.header.frame_id + 'y')  # from

    camera_tag_frame_id = '%s_%s%s' % (
        req.cameraPose.header.frame_id,
        algorithm_abreviations[req.visualFeature.algorithm],
        req.visualFeature.id)

    t55 = '%s%s' % (algorithm_abreviations[req.visualFeature.algorithm],
                    req.visualFeature.id)

    # THIS IS THE GOOD TAG POSITION, AND GOOD TAG ORIENTATION BUT THE TAG FACES AWAY FROM THE CAMERA
    # INTIIALLY GOOD AND ALIGNED WITH c2_from_fixed_t55 BUT THEN ROTATES STRANGELY IF THE CAMERA ROTATES
    t55_from_c2 = '%s%s_from_%s' % (
        algorithm_abreviations[req.visualFeature.algorithm],
        req.visualFeature.id, req.cameraPose.header.frame_id)
    tfBroadcaster.sendTransform(
        (req.visualFeature.pose.pose.position.x,
         req.visualFeature.pose.pose.position.y,
         req.visualFeature.pose.pose.position.z),
        #(req.visualFeature.pose.pose.orientation.x, req.visualFeature.pose.pose.orientation.y, req.visualFeature.pose.pose.orientation.z, req.visualFeature.pose.pose.orientation.w),
        (req.visualFeature.pose.pose.orientation.z,
         -req.visualFeature.pose.pose.orientation.x,
         -req.visualFeature.pose.pose.orientation.y,
         req.visualFeature.pose.pose.orientation.w),
        time_now,
        t55_from_c2,  # to   tag-from-camera-body t55_from_c2
        req.cameraPose.header.frame_id)  # from camera-body c2

    # GOOD:  t55_from_c2_180x
    # GOOD with translationRotationWithAxisChange --> tagDetection.getRelativeTranslationRotation
    # Problem is the once-off localisation against the fixed tag
    #  THIS IS THE GOOD TAG POSE, with the visible tag facing toward the camera
    #  INTIIALLY GOOD  BUT THEN ROTATES STRANGELY WITH t55_from_c2 IF THE CAMERA ROTATES
    t55_from_c2_180x = t55_from_c2 + '_180x'
    tfBroadcaster.sendTransform(
        (0.0, 0.0, 0.0),
        (1.0, 0.0, 0.0, 0.0),
        time_now,
        t55_from_c2_180x,  # to
        t55_from_c2)  # from

    # simplify the below (c2_from_t55_from_c2_180x is GOOD, but maybe simplify)
    quat_t55_from_c2_plain = [
        req.visualFeature.pose.pose.orientation.x,
        req.visualFeature.pose.pose.orientation.y,
        req.visualFeature.pose.pose.orientation.z,
        req.visualFeature.pose.pose.orientation.w
    ]
    quat_c2_from_t55_plain = inverse(quat_t55_from_c2_plain)
    fixed_t55 = 'fixed_%s' % (t55)

    # GOOD:  c2_from_t55_from_c2_180x
    # GOOD with translationRotationWithAxisChange --> tagDetection.getRelativeTranslationRotation
    # Problem is the once-off localisation against the fixed tag
    # NOTE 1:  t55 from c2  is  [ +z -x -y +w ]
    # NOTE 2:  c2 from t55  is  inverse( [ +z -x -y +w ] )
    quat_t55_from_c2 = [
        req.visualFeature.pose.pose.orientation.z,
        -req.visualFeature.pose.pose.orientation.x,
        -req.visualFeature.pose.pose.orientation.y,
        req.visualFeature.pose.pose.orientation.w
    ]
    quat_c2_from_t55 = inverse(quat_t55_from_c2)
    c2_from_t55_from_c2_180x = '%s_from_%s' % (cN, t55_from_c2_180x)
    c2_from_t55_from_c2_180x_tmp = c2_from_t55_from_c2_180x + '_tmp'
    tfBroadcaster.sendTransform(
        #(req.visualFeature.pose.pose.position.x, req.visualFeature.pose.pose.position.y, -req.visualFeature.pose.pose.position.z),
        (0.0, 0.0, 0.0),
        quat_c2_from_t55,
        time_now,
        c2_from_t55_from_c2_180x_tmp,  # to
        t55_from_c2)  # from
    tfBroadcaster.sendTransform(
        (-req.visualFeature.pose.pose.position.x,
         -req.visualFeature.pose.pose.position.y,
         -req.visualFeature.pose.pose.position.z),
        (0.0, 0.0, 0.0, 1.0),
        time_now,
        c2_from_t55_from_c2_180x,  # to
        c2_from_t55_from_c2_180x_tmp)  # from

    #  from fixed tag to camera
    t55_mirrored = t55 + '_mirrored'  # tag is rot+-180Z from robot pov: AprilTags gives tag pose as away from camera i.e. tag x-axis is into the tag / robot-convention frame aligned with back of tag, I treat tag x-axis as out of the tag / robot-convention axes aligned with face of tag
    #   t55 = '%s%s'%(algorithm_abreviations[req.visualFeature.algorithm], req.visualFeature.id)
    c2_from_fixed_t55 = '%s_from_fixed_%s' % (cN, t55)
    c2_from_fixed_t55_tmp = c2_from_fixed_t55 + '_tmp'
    c2_from_fixed_t55_tmp180x = c2_from_fixed_t55 + '_tmp180x'

    tfBroadcaster.sendTransform(
        (
            0.0, 0.0, 0.0
        ),  # same position: _t55_tmp180x is the same position as t55 / t55 ...
        (1.0, 0.0, 0.0, 0.0),  # 180 around x:  ... but rotated 180 around x
        time_now,  # simultaneous 'now'
        c2_from_fixed_t55_tmp180x,  # to
        t55)  # from
    tfBroadcaster.sendTransform(
        #(req.visualFeature.pose.pose.position.x, req.visualFeature.pose.pose.position.y, -req.visualFeature.pose.pose.position.z),
        (0.0, 0.0, 0.0),  # _tmp is same position as the fixed tag ...
        quat_c2_from_t55,  # ... but inverse quaternion to point back toward the camera
        time_now,  # simultaneous 'now'
        c2_from_fixed_t55_tmp,  # to
        c2_from_fixed_t55_tmp180x)  # from
    tfBroadcaster.sendTransform(
        (-req.visualFeature.pose.pose.position.x,
         -req.visualFeature.pose.pose.position.y,
         -req.visualFeature.pose.pose.position.z
         ),  # inverse/reversed translation from fixed tag ...
        (0.0, 0.0, 0.0, 1.0),  # ... same orientation as the _tmp
        time_now,  # simultaneous 'now'
        c2_from_fixed_t55,  # to
        c2_from_fixed_t55_tmp)  # from
    axisMarker(req.visualFeature.id, c2_from_fixed_t55)

    tfBroadcaster.sendTransform(
        (0, 0, 0), (req.visualFeature.pose.pose.orientation.x,
                    req.visualFeature.pose.pose.orientation.y,
                    req.visualFeature.pose.pose.orientation.z,
                    req.visualFeature.pose.pose.orientation.w), time_now,
        camera_tag_frame_id, camera_tag_frame_id + 't'
    )  # from      '/cam/%s/pose' % req.cameraPose.header.frame_id e.g. "/cam/c_1/pose"    # TODO - remove hardcoding to base namespace

    tag_label = '%s%s' % (algorithm_abreviations[req.visualFeature.algorithm],
                          req.visualFeature.id)

    print camera_tag_frame_id + '_mirrored_translation', ': from camera to tag: x=', req.visualFeature.pose.pose.position.z, ', y=', -req.visualFeature.pose.pose.position.x, ', z=', -req.visualFeature.pose.pose.position.y
    print camera_tag_frame_id + '_mirrored_translation', ': from tag to camera: x=', req.visualFeature.pose.pose.position.z, ', y=', -req.visualFeature.pose.pose.position.x, ', z=', req.visualFeature.pose.pose.position.y

    for fixed_feature in fixed_features:
        tfBroadcaster.sendTransform(
            (fixed_feature.pose.position.x, fixed_feature.pose.position.y,
             fixed_feature.pose.position.z),
            (fixed_feature.pose.orientation.x,
             fixed_feature.pose.orientation.y,
             fixed_feature.pose.orientation.z,
             fixed_feature.pose.orientation.w), time_now,
            '%s%s' % (algorithm_abreviations[fixed_feature.algorithm],
                      fixed_feature.id), 'map')
        tfBroadcaster.sendTransform(
            (fixed_feature.pose.position.x, fixed_feature.pose.position.y,
             fixed_feature.pose.position.z),
            (fixed_feature.pose.orientation.x,
             fixed_feature.pose.orientation.y,
             fixed_feature.pose.orientation.z,
             fixed_feature.pose.orientation.w), time_now,
            'fixed_%s%s' % (algorithm_abreviations[fixed_feature.algorithm],
                            fixed_feature.id), 'map')

    if 'c3' == cN:
        try:
            # LATER; 'c1_from_fixed_t32'
            listener.waitForTransform(
                'map', 't50_from_c1_180x', rospy.Time(), rospy.Duration(4.0)
            )  # from the map origin, to the camera, through a fixed point
        except:
            print 'ERROR ------ : no transform from %s to %s ' % (
                'map', 't50_from_c1_180x')

    response = DetectedFeatureResponse()
    response.acknowledgement = "bob"
    return response