Exemple #1
0
def get_transfrom(reference_frame, target_frame):
    listener = tf.TransformListener()
    try:
        listener.waitForTransform(reference_frame,
                                  target_frame,
                                  rospy.Time(0),
                                  timeout=rospy.Duration(1))
        translation_rotation = listener.lookupTransform(
            reference_frame, target_frame, rospy.Time())
    except Exception as e1:
        try:
            tf_buffer = tf2_ros.Buffer()
            tf2_listener = tf2_ros.TransformListener(tf_buffer)
            transform_stamped = tf_buffer.lookup_transform(
                reference_frame,
                target_frame,
                rospy.Time(0),
                timeout=rospy.Duration(1))
            translation_rotation = tf_conversions.toTf(
                tf2_kdl.transform_to_kdl(transform_stamped))
        except Exception as e2:
            rospy.logerr("get_transfrom::\n " +
                         "Failed to find transform from %s to %s" % (
                             reference_frame,
                             target_frame,
                         ))
    return translation_rotation
    def get_frame_offset(self, oframe1, oframe2):
        tf_done = False
        tfl = tf.TransformListener()

        while not tf_done:
            try:
                (trans1, rot1) = tfl.lookupTransform(oframe1, oframe2, rospy.Time(0))
                tf_done = True
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue

        
        tf_done = False

        while not tf_done:
            try:
                (trans2, rot2) = tfl.lookupTransform("/world", self.frame, rospy.Time(0))
                tf_done = True
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue

        print (trans1, rot1)
        print (trans2, rot2)

        f1 = tfc.fromTf((trans1, rot1))
        f2 = tfc.fromTf((trans2, rot2))

        frame = f2 * f1

        self.transform = tfc.toTf(frame)

        tfb = tf.TransformBroadcaster()
        (t, r) = self.transform
        tfb.sendTransform(t, r, rospy.Time.now(), "EX", "/world")
        self.frame = "EX"
def get_frames(num, step, trans, rot, frame=None, off=0, suffix=""):

    f = tfc.fromTf((trans, rot))

    bc = tf.TransformBroadcaster()

    names = []

    for i in range(1, num+1):
        r = tfc.Rotation(f.M)

        (roll, pitch, yaw) = r.GetRPY()
        r = tfc.Rotation.RPY(roll, pitch, yaw + step)

        rf = tfc.Frame(tfc.Rotation.RotZ(step))
        rfz = tfc.Frame(tfc.Rotation.RPY(0,3.14159,0))

        p = rf * tfc.Vector(f.p)
        p2 = tfc.Vector(-1*p.x(), -1*p.y(), -1*p.z())

        r2 = tfc.Rotation.RPY(roll + 3.14159, pitch, (yaw + step))

        f = tfc.Frame(r, p)
        f2 = tfc.Frame(r2, p2)

        (trans2, rot2) = tfc.toTf(f)
        (trans3, rot3) = tfc.toTf(f2)

        if verbose:
            print (trans2, rot2)
            if flipped_frames_allowed:
                print (trans3, rot3)

        if not frame == None:
            bc.sendTransform(trans2, rot2, rospy.Time.now(), frame2 + suffix + "_gen" + str(off+i), frame)
            names.append(frame2 + suffix + "_gen" + str(off+i))

            if flipped_frames_allowed:
                bc.sendTransform(trans3, rot3, rospy.Time.now(), frame2  + suffix + "_flip" + str(off+i), frame)
                names.append(frame2 + suffix + "_flip" + str(off+i))

        if verbose:
            print "---" + str(i) + "---"

    return names
    def update(self):
        if not self.calibrated:
            try:
                T_camera_to_marker = tf_c.fromTf(
                    self.tf_listen.lookupTransform('camera_link',
                                                   'ar_marker_0',
                                                   rospy.Time(0)))
                T_base_endpoint = tf_c.fromTf(
                    self.tf_listen.lookupTransform('base_link', 'endpoint',
                                                   rospy.Time(0)))
                T_base_camera_current = tf_c.fromTf(
                    self.tf_listen.lookupTransform('base_link', 'camera_link',
                                                   rospy.Time(0)))
                T_endpoint_marker = tf_c.fromTf(
                    self.tf_listen.lookupTransform('endpoint',
                                                   'endpoint_marker',
                                                   rospy.Time(0)))

                T_base_camera = T_base_endpoint * T_endpoint_marker * T_camera_to_marker.Inverse(
                )

                # Extract position and rotation for the new transformation
                xyz = tf_c.toTf(T_base_camera)[0]
                rpy = T_base_camera.M.GetRPY()

                # Call to service ###########
                # First check to see if service exists
                try:
                    rospy.wait_for_service(
                        '/semi_static_transform_publisher/UpdateTransform')
                except rospy.ROSException as e:
                    rospy.logerr(
                        'Could not find semi-static transform service')
                    return
                # Make servo call to set pose
                try:
                    # Setup transform to send to sem-static node
                    update_transform_proxy = rospy.ServiceProxy(
                        '/semi_static_transform_publisher/UpdateTransform',
                        UpdateTransform)
                    msg = UpdateTransformRequest()
                    msg.x, msg.y, msg.z = xyz
                    msg.roll, msg.pitch, msg.yaw = rpy

                    # Call to service
                    update_transform_proxy(msg)

                    self.calibrated = True
                except (rospy.ServiceException), e:
                    rospy.logwarn('There was a problem with the service:')
                    rospy.logwarn(e)

            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException) as e:
                rospy.logwarn(e)
                return
    def update(self):

        all_frames = self.listener_.getFrameStrings()
        ar_frames = [f for f in all_frames if f.find('ar_marker')>=0]
        un_filtered = [f for f in ar_frames if f.find('filtered')<0]

        # Decay each frame count at every timestep
        for f in self.frames.keys():
            self.frames[f]['no_frames'] -= 1
            if self.frames[f]['no_frames'] <= 0:
                short_name = 'landmark_' + f.split('_')[len(f.split('_'))-1:][0]
                rospy.delete_param('instructor_landmark/'+short_name)
                rospy.logwarn('Deleted:' +short_name)
                self.frames.pop(f)

        for frame in un_filtered:
            try:
                F = tf_c.fromTf(self.listener_.lookupTransform('/world',frame,rospy.Time(0)))
            except (tf.LookupException, tf.ConnectivityException) as e:
                rospy.logerr('Frame ['+frame+'] not found')
                return
            except tf.ExtrapolationException as e:
                # The frame we're seeing is old
                return

            if frame not in self.frames.keys():
                self.frames[frame] = {'no_frames':0, 'poses':[], 'average_pose':None}
                rospy.logwarn('New Frame:' + frame)

            # rospy.logwarn(self.frames[frame]['no_frames'])
            if self.frames[frame]['no_frames'] < self.buffer:
                self.frames[frame]['no_frames'] += 2
                self.frames[frame]['poses'].append(F)
            else:
                self.frames[frame]['poses'].pop(0)
                self.frames[frame]['poses'].pop(0)
                self.frames[frame]['poses'].append(F)

        for frame in self.frames.keys():
            # Get all stored frame positions/rotations
            sum_xyz = [tf_c.toTf(f)[0] for f in self.frames[frame]['poses']]
            sum_rpy = [f.M.GetRPY() for f in self.frames[frame]['poses']]

            if len(sum_xyz) > 2:
                xyz = np.mean(sum_xyz, 0)
                rpy = np.mean(sum_rpy, 0)

                F_avg = PyKDL.Frame()
                F_avg.p = PyKDL.Vector(*xyz)
                F_avg.M = PyKDL.Rotation.RPY(*rpy)

                self.broadcaster_.sendTransform(tuple(F_avg.p),tuple(F_avg.M.GetQuaternion()),rospy.Time.now(), '/filtered/'+frame, '/world')
                # rosparam_marker_name = "instructor_landmark/{}".format(str('filtered/'+frame))
                # rospy.set_param(rosparam_marker_name, str('filtered/'+frame))
                short_name = 'landmark_' + frame.split('_')[len(frame.split('_'))-1:][0]
                rospy.set_param('instructor_landmark/'+short_name, str('filtered/'+frame))
 def get_transform(self, reference_frame, target_frame):
     try:
         transform_stamped = self.tfBuffer.lookup_transform(
             reference_frame, target_frame, rospy.Time(0),
             rospy.Duration(1.0))
         translation_rotation = tf_conversions.toTf(
             tf2_kdl.transform_to_kdl(transform_stamped))
     except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
             tf2_ros.ExtrapolationException), e:
         print(e)
         rospy.logerr('FAILED TO GET TRANSFORM FROM %s to %s' %
                      (reference_frame, target_frame))
         return None
    def get_most_recent_transform(self):

        left_pose, left_time = self.left_arm_tf_updater.current_pose_and_time
        right_pose, right_time = self.left_arm_tf_updater.current_pose_and_time

        if(right_time > left_time):
            pose = right_pose
            camera_transform = self.right_arm_tf_updater.camera_transform
        else:
            pose = left_pose
            camera_transform = self.left_arm_tf_updater.camera_transform
       
        transform = None
        if(pose):
            checkerboard_in_camera = tf_conversions.toMatrix((tf_conversions.fromMsg(pose)))
            checkerboard_in_body = camera_transform * checkerboard_in_camera
            checkerboard_in_body_tf = tf_conversions.toTf(tf_conversions.toMatrix(checkerboard_in_body))
            checkerboard_rpy = tf.transformations.quaternion_to_euler(*checkerboard_in_body_tf[1])
            transform = checkboard_body_in_tf[0] + checkerboard_rpy

        return transform
Exemple #8
0
 def cb(self, msg):
     pose = Pose(orientation=msg.orientation)
     _, q = tf_conversions.toTf(tf_conversions.fromMsg(pose))
     rpy = tf.transformations.euler_from_quaternion(q)
     print rpy
     self._data.append(rpy)
def flip_rotation_frame(trans, rot):
    f = tfc.fromTf((trans, rot))
    ir = f.M.Inverse()

    return tfc.toTf(tfc.Frame(ir, f.p))
Exemple #10
0
    def update(self):
        # Return if the calibration is finished
        if not self.update_poses:
            return

        # Get all current TF frames. Add any new ones to AR tag list.
        current_frames = self.tf_listen.getFrameStrings()
        current_frames = [
            f for f in current_frames
            if "ar_marker" in f and not ("filtered" in f)
        ]
        # current_frames = [f for f in current_frames if "ar_marker" in f and "filtered" in f]
        for f in current_frames:
            # Detect which camera the marker is sent from
            for c in range(self.n_cameras):
                if self.cameras[c] in f:
                    camera = c
                    break

            # Find tag name, camera frame, and pose
            fid = f[f.find("ar_marker_") + len("ar_marker") + 1:len(f)]
            camera_link = "{}_link".format(self.cameras[camera])

            try:
                ret = tf_c.fromTf(
                    self.tf_listen.lookupTransform(camera_link, f,
                                                   rospy.Time(0)))
            except:
                print "Can't get TF camera->marker", camera_link, f
                continue

            # Check if we've seen this marker before
            if fid not in self.camera_marker_frame_names[camera].keys():
                self.camera_marker_frame_names[camera][fid] = {
                    'name': f,
                    'count': 1
                }
                print "New marker in calibration:", camera, fid, f
            else:
                self.camera_marker_frame_names[camera][fid]['count'] += 1

        # Check for overlapping frames
        marker_1_names = self.camera_marker_frame_names[0].keys()
        marker_2_names = self.camera_marker_frame_names[1].keys()
        overlapping_markers = list(
            set(marker_1_names).intersection(marker_2_names))
        n_overlapping_markers = len(overlapping_markers)
        print "---1---", self.camera_marker_frame_names[0]
        print "---2---", self.camera_marker_frame_names[1]

        # Check if camera 1 sees any fiducials
        if len(self.camera_marker_frame_names[0]) > 0:
            # if the other camera has seen a fiducial then make sure we're seeing the same one
            if all(self.cameras_calibrated):
                if n_overlapping_markers > 0:
                    self._widget.shoulder_status_label.setText(
                        'FOUND FIDUCIAL')
                    self._widget.shoulder_status_label.setStyleSheet(
                        'color:#ffffff;background-color:#9AD111')
                    self.cameras_calibrated[0] = True
                else:
                    self._widget.shoulder_status_label.setText(
                        "FIDUCIALS DON'T MATCH")
            # Else if the only marker seen then say 'found'
            else:
                self._widget.shoulder_status_label.setText('FOUND FIDUCIAL')
                self._widget.shoulder_status_label.setStyleSheet(
                    'color:#ffffff;background-color:#9AD111')
                self.cameras_calibrated[0] = True

        # Check if camera 2 sees any fiducials
        if len(self.camera_marker_frame_names[1]) > 0:
            # if the other camera has seen a fiducial then make sure we're seeing the same one
            if all(self.cameras_calibrated):
                n_overlapping_markers = len(
                    set(marker_1_names).intersection(marker_2_names))
                if n_overlapping_markers > 0:
                    self._widget.wrist_status_label.setText('FOUND FIDUCIAL')
                    self._widget.wrist_status_label.setStyleSheet(
                        'color:#ffffff;background-color:#9AD111')
                    self.cameras_calibrated[1] = True
                else:
                    self._widget.wrist_status_label.setText(
                        "FIDUCIALS DON'T MATCH")
            # Else if the only marker seen then say 'found'
            else:
                self._widget.wrist_status_label.setText('FOUND FIDUCIAL')
                self._widget.wrist_status_label.setStyleSheet(
                    'color:#ffffff;background-color:#9AD111')
                self.cameras_calibrated[1] = True

        # Check if all cameras are calibrated
        if self.cameras_calibrated[0] and self.cameras_calibrated[1]:
            # First verify that the camera links (base to optical frames) are valid
            try:
                ret = self.tf_listen.lookupTransform(
                    "/base_link", "camera_1_rgb_optical_frame", rospy.Time(0))
                ret = self.tf_listen.lookupTransform(
                    "/base_link", "camera_2_rgb_optical_frame", rospy.Time(0))
            except:
                txt = "ROBOT NOT CONNECTED"
                self._widget.robot_calibration_status_label.setText(txt)
                return

            # Check that each camera has seen the same fiduual
            if n_overlapping_markers == 0:
                self._widget.robot_calibration_status_label.setText(
                    "FIDUCIALS DON'T MATCH")
                return

            # Get transform from robot base to camera 2
            """
            Want: World -> Cam2
            Have:
                Cam1->Marker1
                Cam2->Marker2
                Robot->Cam1
            Robot->Cam2 = (Robot->Cam1) * (Cam1->Marker1) * (Cam2->Marker2)^-1
            """

            # Find one of the markers seen by both cameras
            # print overlapping_markers, self.camera_marker_frame_names[0].keys(), self.camera_marker_frame_names[1].keys()
            marker_1_counts = [
                self.camera_marker_frame_names[0][x]['count']
                for x in overlapping_markers
            ]
            marker_2_counts = [
                self.camera_marker_frame_names[1][x]['count']
                for x in overlapping_markers
            ]
            overlap_count = [(marker_1_counts[i] + marker_2_counts[i]) / 2.
                             for i in range(n_overlapping_markers)]
            ar_idx = overlapping_markers[np.argmax(overlap_count)]

            marker_1 = self.camera_marker_frame_names[0][ar_idx]['name']
            marker_2 = self.camera_marker_frame_names[1][ar_idx]['name']
            # print "Overlapping name:", marker_1, marker_2

            base_frame = '/base_link'
            try:
                T_base_camera_1 = tf_c.fromTf(
                    self.tf_listen.lookupTransform(base_frame,
                                                   '/camera_1_link',
                                                   rospy.Time(0)))
            except:
                rospy.logwarn("Can't get TF robot->camera1")
                return

            try:
                T_camera_1_marker_1 = tf_c.fromTf(
                    self.tf_listen.lookupTransform('/camera_1_link', marker_1,
                                                   rospy.Time(0)))
            except:
                rospy.logwarn("Can't get TF camera1->marker")
                return
            try:
                T_camera_2_marker_2 = tf_c.fromTf(
                    self.tf_listen.lookupTransform('/camera_2_link', marker_2,
                                                   rospy.Time(0)))
            except:
                rospy.logwarn("Can't get TF camera2->marker")
                return

            T_base_camera_2 = T_base_camera_1 * T_camera_1_marker_1 * T_camera_2_marker_2.Inverse(
            )

            # Extract position and rotation for the new transformation
            xyz = tf_c.toTf(T_base_camera_2)[0]
            rpy = T_base_camera_2.M.GetRPY()

            # Smooth out output frame to prevent large changes due to noise marker normals
            self.all_xyz += [xyz]
            self.all_rpy += [rpy]

            if len(self.all_xyz) > 2:
                xyz = np.mean(self.all_xyz, 0)
                rpy = np.mean(self.all_rpy, 0)
                # xyz = np.median(self.all_xyz, 0)
                # rpy = np.median(self.all_rpy, 0)
            else:
                return

            # Send the transform to the semi-static transformer
            # First check to see if service exists
            try:
                rospy.wait_for_service(
                    '/semi_static_transform_publisher/UpdateTransform')
            except rospy.ROSException as e:
                rospy.logerr('Could not find semi-static transform service')
                return
            # Make servo call to set pose
            try:
                # Setup transform to send to sem-static node
                update_transform_proxy = rospy.ServiceProxy(
                    '/semi_static_transform_publisher/UpdateTransform',
                    UpdateTransform)
                msg = UpdateTransformRequest()
                msg.x, msg.y, msg.z = xyz
                msg.roll, msg.pitch, msg.yaw = rpy

                # Call to service
                update_transform_proxy(msg)
            except (rospy.ServiceException), e:
                rospy.logwarn('There was a problem with the service:')
                rospy.logwarn(e)

            # Update GUI
            self._widget.robot_calibration_status_label.setText(
                'ROBOT CALIBRATED')
            self._widget.robot_calibration_status_label.setStyleSheet(
                'color:#ffffff;background-color:#9AD111')
            self._widget.done_btn.setStyleSheet('color:#429611')
            self.calibrated = True
Exemple #11
0
def Map2localization(trans):
    global estimated_tf, init, localize, offset_hist, map2odom
    offset = TransformStamped()
    # We want to transform the detected aruco pose to map frame to do a reasonable comparison
    qinv = quaternion_from_euler(0, 0, 0)
    qdetected = quaternion_from_euler(0, 0, 0)
    if init:
        #Making sure that a Kalman filter is initialized for each marker if we don't have the Aruco Marker registered in the dictionary.
        estimated_tf = cv2.KalmanFilter(6,
                                        6)  # Initialization of kalman filter.
        trans_mat = np.identity(6, np.float32)
        estimated_tf.transitionMatrix = trans_mat  #x'=Ax+BU         transition matrix is A
        estimated_tf.measurementMatrix = trans_mat
        estimated_tf.processNoiseCov = cv2.setIdentity(
            estimated_tf.processNoiseCov, 1e-3)  #4
        estimated_tf.measurementNoiseCov = cv2.setIdentity(
            estimated_tf.measurementNoiseCov, 1e-2)  #2
        estimated_tf.errorCovPost = cv2.setIdentity(
            estimated_tf.errorCovPost)  #, 1)
        init = False

    if not buff.can_transform(trans.child_frame_id, 'cf1/odom',
                              trans.header.stamp, rospy.Duration(0.5)):
        rospy.logwarn_throttle(
            5.0, 'No tranform from %s to map' % trans.child_frame_id)
        return
    else:
        t = TransformStamped()
        try:
            # We want to keep the relative position.... We can calculate the error betwen these frames.
            t = buff.lookup_transform('cf1/odom', trans.child_frame_id,
                                      rospy.Time(0), rospy.Duration(0.5))
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rospy.logwarn_throttle(
                5.0, 'No tranform from %s to odom' % trans.child_frame_id)

            return

        #t.transform.translation.z=0.35
        #==============================================================================
        #Ludvig's solution
        #==============================================================================
        Fodom = tf_conversions.fromTf(
            ((t.transform.translation.x, t.transform.translation.y,
              t.transform.translation.z),
             (t.transform.rotation.x, t.transform.rotation.y,
              t.transform.rotation.z, t.transform.rotation.w)))
        Fmap = tf_conversions.fromMsg(json_markers[int(
            trans.child_frame_id[-1])])

        Fdiff = Fmap * Fodom.Inverse()

        offset = TransformStamped()
        offset.header.stamp = rospy.Time.now()
        offset.header.frame_id = 'map'
        offset.child_frame_id = 'cf1/odom'
        ((offset.transform.translation.x, offset.transform.translation.y,
          offset.transform.translation.z),
         (offset.transform.rotation.x, offset.transform.rotation.y,
          offset.transform.rotation.z,
          offset.transform.rotation.w)) = tf_conversions.toTf(Fdiff)
        #======================================
        #======================================
        #=====================================
        #offset.transform.translation.z = 0 # Not considering any changes in z-axis
        #print("before filter " + str(offset.transform.translation.x) + " " + str(offset.transform.translation.y) + " " + str(offset.transform.translation.z))
        """
        print("")
        offset.transform.translation.x = butter_lowpass_filter(offset.transform.translation.x, cutoff, fs, order)
        offset.transform.translation.y = butter_lowpass_filter(offset.transform.translation.y, cutoff, fs, order)
        offset.transform.translation.z = butter_lowpass_filter(offset.transform.translation.z, cutoff, fs, order)
        #offset.transform.rotation.x = butter_lowpass_filter(offset.transform.rotation.x, cutoff, fs, order)
        #offset.transform.rotation.y = butter_lowpass_filter(offset.transform.rotation.y, cutoff, fs, order)
        #offset.transform.rotation.z = butter_lowpass_filter(offset.transform.rotation.z, cutoff, fs, order)
        #offset.transform.rotation.w = butter_lowpass_filter(offset.transform.rotation.w, cutoff, fs, order)
        """
        #print("after filter " + str(offset.transform.translation.x) + " " + str(offset.transform.translation.y) + " " + str(offset.transform.translation.z))

        #=====================================
        #offset_hist.append((offset.transform.translation.x,offset.transform.translation.y,offset.transform.translation.z))
        #offset_hist.append(offset.transform.translation.z)

        # pp.pprint(offset_hist)
        # print('list\n')
        #print(offset_hist)
        # if len(offset_hist)>1:
        #     offset_hist=offset_hist[-1:]
        #     #print(offset_hist)
        #     average = np.mean(offset_hist, axis=0)
        #     #print(average)
        #     offset.transform.translation.x =average[0]
        #     offset.transform.translation.y =average[1]
        #     offset.transform.translation.z =average[2]
        #     #offset.transform.translation.y = average[1]
        # #     print('hi!')
        # #     Fdiff=np.average(offset_hist)
        # #     print('list 2\n')
        # #     pp.pprint(Fdiff)
        # #     #sum(offset_hist)/float(len(offset_hist))
        #     del offset_hist[:]
        #     offset.transform.translation.z = 0
        # #     #=====================================

        #     tf_bc.sendTransform(offset)
        #     map2odom=offset
        #     localize.data=True
        #     pub_tf.publish(localize)
        #     #=====================================

        #offset.transform.translation.z = 0 # Not considering any changes in z-axis

        filtered_offset = copy.deepcopy(offset)

        # ########Update the Kalman filter
        rot_ang = np.array([
            filtered_offset.transform.rotation.x,
            filtered_offset.transform.rotation.y,
            filtered_offset.transform.rotation.z,
            filtered_offset.transform.rotation.w
        ], np.float32).reshape(-1, 1)
        translation = np.array([
            filtered_offset.transform.translation.x,
            filtered_offset.transform.translation.y
        ], np.float32).reshape(-1, 1)

        #                             #    x,y,z       roll,pitch,yaw
        measurements = np.concatenate((translation, rot_ang))
        prediction = estimated_tf.predict()
        estimation = estimated_tf.correct(measurements.reshape(-1, 1))

        offset.transform.translation.x = estimation[0]
        offset.transform.translation.y = estimation[1]

        offset.transform.translation.x = estimation[0]
        offset.transform.translation.y = estimation[1]
        offset.transform.translation.z = 0
        offset.transform.rotation.x = estimation[2]
        offset.transform.rotation.y = estimation[3]
        offset.transform.rotation.z = estimation[4]
        offset.transform.rotation.w = estimation[5]
        n = np.linalg.norm([
            offset.transform.rotation.x, offset.transform.rotation.y,
            offset.transform.rotation.z, offset.transform.rotation.w
        ])
        # #Normalize the quaternion
        offset.transform.rotation.x /= n
        offset.transform.rotation.y /= n
        offset.transform.rotation.z /= n
        offset.transform.rotation.w /= n
        #'''
        #tf_bc.sendTransform(offset)
        map2odom = offset
        localize.data = True
        pub_tf.publish(localize)