def estimate_rot(data_num=1):
    #your code goes here
    imu = io.loadmat(
        os.path.join(os.path.dirname(__file__),
                     "imu/imuRaw" + str(data_num) + ".mat"))
    #imu = io.loadmat('imu/imuRaw'+str(data_num)+'.mat')
    accel = imu['vals'][0:3, :]
    gyro = imu['vals'][3:6, :]
    T = np.shape(imu['ts'])[1]
    time = imu['ts']

    w_z, w_x, w_y, a_x, a_y, a_z = calibration(imu, accel, gyro, T)
    angular_velocities = np.array([w_x, w_y, w_z])
    accelerations = np.array([a_x, a_y, a_z])
    dt = np.diff(time)
    iters = accelerations.shape[1] - 1
    roll, pitch, yaw = [], [], []
    #print(angular_velocities[:, 0])
    ukf = UKF(angular_velocities, accelerations, dt)
    roll, pitch, yaw = ukf.main(ukf, iters)
    """
	for i in range(iters):  
		print("At Iteration: {}".format(i ))
		omega = angular_velocities[:, i].reshape(3, 1)
		accel = accelerations[:, i].reshape(3, 1) 	
		ukf.forward(i)   
		mu_q = Quaternion()
		mu_q.q = ukf.mu
		euler_angles = mu_q.euler_angles() 
		roll.append(euler_angles[0])
		pitch.append(euler_angles[1])
		yaw.append(euler_angles[2])
	"""
    roll, pitch, yaw = np.array(roll), np.array(pitch), np.array(yaw)
    return roll, pitch, yaw
Example #2
0
    def setUp(self):

        def fx(x, T): return x

        def hx(x): return x

        x0 = np.array([0, 0])
        P0 = np.array([[1, 0.5], [0.5, 1]])
        Q = np.eye(2)
        R = np.eye(2)

        self.ukf = UKF(fx, hx, Q, R, x0, P0)
Example #3
0
class TestUKF(unittest.TestCase):
    def setUp(self):

        def fx(x, T): return x

        def hx(x): return x

        x0 = np.array([0, 0])
        P0 = np.array([[1, 0.5], [0.5, 1]])
        Q = np.eye(2)
        R = np.eye(2)

        self.ukf = UKF(fx, hx, Q, R, x0, P0)

    def test_predict(self):
        self.ukf.predict(T=1)
        (x, P) = self.ukf.get_state()
        x = np.round(x, decimals=5)
        P = np.round(P, decimals=5)

        self.assertTrue((x == np.array([0, 0])).all())
        self.assertTrue((P == np.array([[2, 0.5], [0.5, 2]])).all())

    def test_update(self):
        self.ukf.predict(T=1)
        self.ukf.update(np.array([1, 1]))

        (x, P) = self.ukf.get_state()
        x = np.round(x, decimals=5)
        P = np.round(P, decimals=5)

        self.assertTrue((x == np.array([0.71429,  0.71429])).all())
        self.assertTrue((P == np.array([[0.65714,  0.05714], [0.05714, 0.65714]])).all())
Example #4
0
def estimate_rot(data_num=1):
    vals_data = []
    ts_data = []
    filename = os.path.join(os.path.dirname(__file__), "imu/imuRaw" + str(data_num) + ".mat")
    imuRaw = io.loadmat(filename)
    trim_imu = {k:imuRaw[k] for k in ['vals', 'ts']}
    trim_imu['vals'] = np.rollaxis(trim_imu['vals'], 1)
    trim_imu['ts'] =  np.rollaxis(trim_imu['ts'], 1)
    trim_imu['vals'][:, [3, 5]] = trim_imu['vals'][:, [5, 3]]
    trim_imu['vals'][:, [3, 4]] = trim_imu['vals'][:, [4, 3]]
    trim_imu['vals'] = trim_imu['vals'].astype(float)
    vals_data.append(trim_imu['vals'])
    ts_data.append(trim_imu['ts'])
    
    imu_data_new = []
    for data in vals_data:
        cleaned_data = clean_data_imu(data)
        imu_data_new.append(cleaned_data)
    
    imu_data_new[0][:,0] = -imu_data_new[0][:,0]
    imu_data_new[0][:,1] = -imu_data_new[0][:,1]
    delta_ts = []
    ts = np.diff(ts_data[0], axis = 0)
    delta_ts = np.concatenate([np.array([0]).reshape(1,1), ts])
    
    ###UKF
    roll, pitch, yaw =  UKF(imu_data_new[0], delta_ts)
    return roll,pitch,yaw
Example #5
0
def create_filter(x, P):
    def fx(x, T):
        # x.shape = (N, k, 1)
        # x = [xp, yp, v, phi, dphi]
        xp = x[:, 0, :] + T * x[:, 2, :] * np.cos(x[:, 3, :])
        yp = x[:, 1, :] + T * x[:, 2, :] * np.sin(x[:, 3, :])
        v = x[:, 2, :]

        phi = x[:, 3, :] + T * x[:, 4, :]
        dphi = x[:, 4, :]

        x = np.concatenate((xp, yp, v, phi, dphi), axis=1)
        return x[..., None]

    q_v = 1**2
    q_dphi = 1**2
    gamma = np.array([[0, 0, 1, 0, 0], [0, 0, 0, 0, 1]])
    Q = gamma.transpose() @ np.diag([q_v, q_dphi]) @ gamma

    def hx(x):
        # x.shape = (N, k, 1)
        return x[:, 0:-1, :]

    r_x = 3**2
    r_y = 3**2
    r_v = 3**2
    r_phi = 2**2
    R = np.diag([r_x, r_y, r_v, r_phi])

    ukf = UKF(fx, hx, Q, R, x, P)
    return ukf
    def solve_ukf(self):
        def fx(x, dt):

            # x[4], x[5], x[6] = x[4], normalize_angle(x[5]), normalize_angle(x[6])
            sol = self.ode2(x)
            # sol = self.ode_int(x)
            return np.array(sol)

        def hx(x):
            return np.array([x[3], x[2], normalize_angle(x[4])])

        # points = MerweScaledSigmaPoints(n=7, alpha=.00001, beta=2., kappa=-4.)
        points = JulierSigmaPoints(n=7, kappa=-4., sqrt_method=None)
        # points = SimplexSigmaPoints(n=7)
        kf = UKF(dim_x=7,
                 dim_z=3,
                 dt=self.dt,
                 fx=fx,
                 hx=hx,
                 points=points,
                 sqrt_fn=None,
                 x_mean_fn=self.state_mean,
                 z_mean_fn=self.meas_mean,
                 residual_x=self.residual_x,
                 residual_z=self.residual_z)

        x0 = np.array(self.ini_val)
        # x0 = np.reshape(x0, (1,7))

        kf.x = x0  # initial mean state
        kf.Q *= np.diag([.0001, .0001, .0001, .0001, .0001, .01, .01])
        kf.P *= 0.000001  # kf.P = eye(dim_x) ; adjust covariances if necessary
        kf.R *= 0.0001

        Ms, Ps = kf.batch_filter(self.zs)
        Ms[:, 5] = self.al_to_th(Ms[:, 5])
        Ms[:, 6] = self.al_to_th(Ms[:, 6])

        return Ms
Example #7
0
 def __init__(self, prediction, trackIdCount):
     """Initialize variables used by Track class
     Args:
         prediction: predicted centroids of object to be tracked
         trackIdCount: identification of each track object
     Return:
         None
     """
     self.track_id = trackIdCount  # identification of each track object
     self.KF = UKF(
         prediction)  #,self.iterate_x)  # KF instance to track this object
     self.prediction = np.asarray(prediction)  # predicted centroids (x,y)
     self.skipped_frames = 0  # number of frames skipped undetected
     self.trace = []  # trace path
Example #8
0
    def move_wheelchair(self):

        self.ini_cwo_l = 2 * np.pi * np.random.random_sample() * -np.pi
        self.ini_cwo_r = 2 * np.pi * np.random.random_sample() * -np.pi

        while rospy.get_time() == 0.0:
            continue

        rospy.sleep(1)

        # self.ini_val = [self.wheel_cmd.angular.z, -self.wheel_cmd.linear.x, -self.odom_y, self.odom_x, self.odom_th, self.th_to_al(self.l_caster_angle), self.th_to_al(self.r_caster_angle)]
        self.ini_val = [
            0.0, 0.0, 0.0, 0.0, 0.0, self.ini_cwo_l, self.ini_cwo_r
        ]

        # self.ini_val = np.random.uniform(low=-1.0, high=1.0, size=(7,)).tolist()

        # UKF initialization
        def fx(x, dt):

            sol = self.ode2(x)
            return np.array(sol)

        def hx(x):
            return np.array([x[3], x[2], normalize_angle(x[4])])

        # points = MerweScaledSigmaPoints(n=7, alpha=.00001, beta=2., kappa=-4.)
        points = JulierSigmaPoints(n=7, kappa=-4., sqrt_method=None)
        # points = SimplexSigmaPoints(n=7)
        kf = UKF(dim_x=7,
                 dim_z=3,
                 dt=self.dt,
                 fx=fx,
                 hx=hx,
                 points=points,
                 sqrt_fn=None,
                 x_mean_fn=self.state_mean,
                 z_mean_fn=self.meas_mean,
                 residual_x=self.residual_x,
                 residual_z=self.residual_z)

        x0 = np.array(self.ini_val)

        kf.x = x0  # initial mean state
        kf.Q *= np.diag([.0001, .0001, .0001, .0001, .0001, .01, .01])
        kf.P *= 0.000001  # kf.P = eye(dim_x) ; adjust covariances if necessary
        kf.R *= 0.0001

        count = 0
        rospy.loginfo("Moving robot...")
        start = rospy.get_time()
        self.r.sleep()

        while (rospy.get_time() - start <
               self.move_time) and not rospy.is_shutdown():

            self.pub_twist.publish(self.wheel_cmd)

            z = np.array([self.odom_x, -self.odom_y, self.odom_th])
            self.zs.append(z)

            kf.predict()
            kf.update(z)

            self.xs.append(kf.x)

            self.pose_x_data.append(self.odom_x)
            self.pose_y_data.append(self.odom_y)
            self.pose_th_data.append(self.odom_th)
            self.l_caster_data.append(self.l_caster_angle)
            self.r_caster_data.append(self.r_caster_angle)

            count += 1
            self.r.sleep()

        # Stop the robot
        self.pub_twist.publish(Twist())
        self.xs = np.array(self.xs)
        rospy.sleep(1)
Example #9
0
    return z

if __name__ == "__main__":
    read_file = True
    if read_file:
        t, v, w = readFile()
        vc, wc = generateVelocities(t)
    else:
        t = np.arange(0, params.tf, params.dt)
        vc, wc = generateVelocities(t)
        v = vc + np.sqrt(params.alpha1 * vc**2 + params.alpha2 * wc**2) * np.random.randn(vc.size)
        w = wc + np.sqrt(params.alpha3 * vc**2 + params.alpha4 * wc**2) * np.random.randn(wc.size)

    Car = CarAnimation()
    ukf = UKF(params.dt)

    x_hist = []
    mu_hist = []
    err_hist = []
    x_covar_hist = []
    y_covar_hist = []
    psi_covar_hist = []
    K_hist = []

    x0 = params.x0
    y0 = params.y0
    phi0 = params.theta0
    state = np.array([x0, y0, phi0])
    dead_reckon = np.array([x0, y0, phi0])
    mu = np.array([x0, y0, phi0])
rnn100 = load_model('models/rnn-units100-simungm-{0}-{1}.h5'.format(
    training_size, T))

all_y = []
all_x = []

ukf_mses = []
ekf_mses = []
lstm_mses = []
lstm_stacked_mses = []
bar = ProgressBar()
for s in bar(range(num_sims)):
    #x_0 = np.random.normal(0, x_var, 1)
    x_0 = np.array([0])
    sim = UNGM(x_0, R, Q, x_var)
    ukf = UKF(sim.f, sim.F, sim.h, sim.H, sim.Q, sim.R, 5., first_x_0, 1)
    ekf = EKF(sim.f, sim.F, sim.h, sim.H, sim.Q, sim.R, first_x_0, 1)
    for t in range(T):
        x, y = sim.process_next()
        ukf.predict()
        ukf.update(y)
        ekf.predict()
        ekf.update(y)
    ukf_mses.append(MSE(ukf.get_all_predictions(), sim.get_all_x()))
    ekf_mses.append(MSE(ekf.get_all_predictions(), sim.get_all_x()))

    all_x.append(np.array(sim.get_all_x()))
    all_y.append(np.array(sim.get_all_y()))

X = np.array(all_y)[:, :-1, :]
y = np.array(all_x)[:, 1:, :]
Example #11
0
def main():
    np.set_printoptions(precision=3)

    # Process Noise
    q = np.eye(6)
    q[0][0] = 0.0001
    q[1][1] = 0.0001
    q[2][2] = 0.0004
    q[3][3] = 0.0025
    q[4][4] = 0.0025
    q[5][5] = 0.0025
    realx = [0]
    realy = [0]
    estimatex = [0]
    estimatey = [0]
    t = [0]
    # create measurement noise covariance matrices
    r_imu = np.zeros([2, 2])
    r_imu[0][0] = 0.01
    r_imu[1][1] = 0.03

    r_compass = np.zeros([1, 1])
    r_compass[0][0] = 0.02

    r_encoder = np.zeros([1, 1])
    r_encoder[0][0] = 0.001

    # pass all the parameters into the UKF!
    # number of state variables, process noise, initial state, initial coariance, three tuning paramters, and the iterate function
    state_estimator = UKF(6, q, np.zeros(6), 0.0001 * np.eye(6), 0.04, 0.0,
                          2.0, iterate_x)

    with open('example.csv', 'r') as csvfile:
        reader = csv.reader(csvfile)
        headers = next(reader)

        last_time = 0
        # read data
        for row in reader:
            row = [float(x) for x in row]

            cur_time = row[0]
            d_time = cur_time - last_time
            real_state = np.array([row[i] for i in [5, 6, 4, 3, 2, 1]])

            # create an array for the data from each sensor
            compass_hdg = row[9]
            compass_data = np.array([compass_hdg])

            encoder_vel = row[10]
            encoder_data = np.array([encoder_vel])

            imu_accel = row[7]
            imu_yaw_rate = row[8]
            imu_data = np.array([imu_yaw_rate, imu_accel])

            last_time = cur_time

            # prediction is pretty simple
            state_estimator.predict(d_time)

            # updating isn't bad either
            # remember that the updated states should be zero-indexed
            # the states should also be in the order of the noise and data matrices
            state_estimator.update([4, 5], imu_data, r_imu)
            state_estimator.update([2], compass_data, r_compass)
            state_estimator.update([3], encoder_vel, r_encoder)

            print("--------------------------------------------------------")
            print("Real state: ", real_state)
            print("Estimated state: ", state_estimator.get_state())
            print("Difference: ", real_state - state_estimator.get_state())
            realx.append(real_state[3])
            realy.append(real_state[1])
            estimatex.append(state_estimator.get_state(3))
            estimatey.append(state_estimator.get_state(1))
            t.append(d_time)

    plt.plot(estimatex, "-b")
    plt.plot(realx, "-r")
Example #12
0
import numpy as np
import matplotlib.pyplot as plt
import car_params as params
import scipy.io as sio
from ukf import UKF
from ukf import unwrap
from extractdata import *

if __name__ == "__main__":
    ukf = UKF()

    x_hist = truth_data['x_truth']
    y_hist = truth_data['y_truth']
    theta_hist = truth_data['th_truth']
    t_truth = truth_data['t_truth']
    mu_hist = []
    err_hist = []
    covar_hist = []

    x0 = params.x0
    y0 = params.y0
    phi0 = params.theta0
    mu = np.array([x0, y0, phi0])
    Sigma = np.eye(3)

    t_prev = 0.0
    meas_index = 0
    for i in range(odom_t.size):
        #stuff for plotting
        mu_hist.append(mu)
        dt = odom_t.item(i) - t_prev
Example #13
0
                           [0, 0, 1.0000]])  # MATLAB's CAMERA
        self.tf_cam_ego = inv_tf(
            np.array([[0., 0., 1., 0.05], [-1., 0., 0., 0.],
                      [0., -1., 0., 0.07], [0., 0., 0., 1.]]))

    def pnt3d_to_pix(self, pnt_c):
        """ input: assumes pnt in camera frame output: [row, col] i.e. the projection of xyz onto camera plane """
        rc = self.K @ np.reshape(pnt_c[0:3], 3, 1)
        return np.array([rc[1], rc[0]]) / rc[2]


if __name__ == '__main__':
    np.set_printoptions(
        linewidth=100,
        suppress=True)  # format numpy so printing matrices is more clear
    ukf = UKF()
    ukf.mu = np.array([
        -0.88312477, -0.15021993, 0.83246046, 0., 0., 0., 0.99925363,
        -0.00460481, 0.00205432, 0.03829992, 0., 0, 0.
    ])
    ukf.camera = camera()
    ukf.bb_3d = np.array([[0.135, 0.135, 0.065, 1.],
                          [0.135, 0.135, -0.065, 1.],
                          [0.135, -0.135, -0.065, 1.],
                          [0.135, -0.135, 0.065, 1.],
                          [-0.135, -0.135, 0.065, 1.],
                          [-0.135, -0.135, -0.065, 1.],
                          [-0.135, 0.135, -0.065, 1.],
                          [-0.135, 0.135, 0.065, 1.]])

    tf_ego_w = np.array([[0.99903845, 0.01473524, 0.04129215, 3.6821852],
Example #14
0
def run_execution_loop():
    b_use_gt_bb = rospy.get_param('~b_use_gt_bb')
    b_use_gt_pose_init = rospy.get_param('~b_use_gt_pose_init')
    b_use_gt_detect_bb = rospy.get_param('~b_use_gt_detect_bb')
    b_use_track_checks = rospy.get_param('~b_use_track_checks')
    b_verbose = rospy.get_param('~b_verbose')
    detection_period_ros = rospy.get_param('~detection_period')  # In seconds
    objects_sizes_yaml = rospy.get_param('~object_sizes_file')
    objects_used_path = rospy.get_param('~object_used_file')
    classes_names_file = rospy.get_param('~classes_names_file')
    b_pub_3d_bb_proj = rospy.get_param('~b_pub_3d_bb_proj')
    detector_weights = rospy.get_param('~detector_weights')
    detector_cfg = rospy.get_param('~detector_cfg')
    b_filter_meas = True

    ros = ROS(b_use_gt_bb,
              b_verbose,
              b_use_gt_pose_init,
              b_use_gt_detect_bb,
              b_pub_3d_bb_proj,
              b_publish_gt_3d_projections=(
                  False and b_pub_3d_bb_proj))  # create a ros interface object

    # Returns dict of params per class name
    category_params = load_category_params()

    bb_3d, obj_width, obj_height, classes_names, classes_ids, objects_names_per_class, connected_inds = \
        get_object_sizes_from_yaml(objects_sizes_yaml, objects_used_path, classes_names_file, category_params)  # Parse objects used and associated configurations

    ros.objects_names_per_class = objects_names_per_class
    ros.bb_3d = bb_3d

    if b_use_gt_bb:
        rospy.logwarn(
            "\n\n\n------------- IN DEBUG MODE (Using Ground Truth Bounding Boxes) -------------\n\n\n"
        )
        time.sleep(0.5)

    if not b_use_gt_bb:
        print('Waiting for first image')
        im = ros.get_first_image()
        print('initializing image segmentor!!!!!!')
        detector_name = 'edge_tpu_mobile_det'  #  detector_name='edge_tpu_mobile_det'  |  yolov3 (default)
        ros.im_seg = ImageSegmentor(im,
                                    detector_name=detector_name,
                                    use_trt=rospy.get_param('~b_use_tensorrt'),
                                    detection_period=detection_period_ros,
                                    verbose=b_verbose,
                                    detect_classes_ids=classes_ids,
                                    detect_classes_names=classes_names,
                                    use_track_checks=b_use_track_checks,
                                    use_gt_detect_bb=b_use_gt_detect_bb,
                                    detector_weights=detector_weights)
        print('initializing DONE - PLAY BAG NOW!!!!!!')
        time.sleep(0.5)

    rate = rospy.Rate(30)  # max filter rate
    ukf_dict = {}  # key: object_id value: ukf object
    ros.camera = camera(ros)
    my_camera = ros.camera
    loop_time_hist = []
    fe_time_hist = []
    be_time_hist = []
    loop_ave_info = [0, 0]  # [running mean, num els]
    fe_ave_info = [0, 0]  # [running mean, num els]
    be_ave_info = [0, 0]  # [running mean, num els]

    ros.create_subs_and_pubs()
    dim_state = 13
    state_est = np.zeros((dim_state + dim_state**2, ))
    loop_count = 0
    previous_image_time = 0

    if b_use_gt_bb:
        init_state_from_gt(ros, ukf_dict['quad4'])
        img_seg = None

    tic = time.time()
    while not rospy.is_shutdown():
        # store data locally (so it doesnt get overwritten in ROS object)
        loop_time = ros.latest_img_time
        if loop_time <= previous_image_time or ros.latest_img_time < 0:
            # this means we dont have new data yet
            rate.sleep()
            continue
        if loop_count == 0:
            # first iteration, need initial time so dt will be accurate
            previous_image_time = loop_time
            loop_count += 1
            rate.sleep()
            continue

        t_be_start = time.time()  # start timer for backend
        new_fe_time = ros.front_end_time
        fe_time_hist.append(new_fe_time)
        loop_time_hist.append(loop_time - previous_image_time)
        previous_image_time = loop_time  # this ensures we dont reuse the image

        # get latest data from ros
        processed_image = ros.im_process_output
        im_seg_mode = ros.latest_bb_method

        # do we have any objects?
        num_obj_in_img = len(processed_image)
        if num_obj_in_img == 0:  # if no objects are seen, dont do anything
            print("No objects detected/tracked in FOV")
            rate.sleep()
            continue

        tf_w_ego = ros.tf_w_ego
        tf_w_ego_gt = ros.tf_w_ego_gt
        tf_ego_w = inv_tf(tf_w_ego)  # ego quad pose

        if b_use_gt_bb:
            raise RuntimeError("b_use_gt_bb option NOT YET IMPLEMENTED")

        # handle each object seen
        obj_ids_tracked = []
        for obj_id, (abb, class_str, valid) in processed_image.items():
            ukf = None
            if not obj_id in ukf_dict:  # New Object
                print("new object (id = {}, type = {})".format(
                    obj_id, class_str))
                ukf_dict[obj_id] = UKF(camera=my_camera,
                                       bb_3d=bb_3d[class_str],
                                       obj_width=obj_width[class_str],
                                       obj_height=obj_height[class_str],
                                       ukf_prms=category_params[class_str],
                                       init_time=loop_time,
                                       class_str=class_str,
                                       obj_id=obj_id,
                                       verbose=b_verbose)
                if b_use_gt_pose_init:
                    approx_position, _ = ukf_dict[obj_id].approx_pose_from_bb(
                        abb, tf_w_ego)
                    gt_pose = ros.get_closest_pose(class_str, approx_position)
                    ukf_dict[obj_id].reinit_filter_from_gt(gt_pose)
                    # Avoid reusing GT to initialize pose TODO Currently limits gt to first object
                    b_use_gt_pose_init = False
                else:
                    ukf_dict[obj_id].reinit_filter_approx(abb, tf_w_ego)
                continue

            obj_ids_tracked.append(obj_id)

            if ukf_dict[obj_id] is not None:
                ukf_dict[obj_id].step_ukf(abb, tf_ego_w,
                                          loop_time)  # update ukf
                if b_pub_3d_bb_proj:
                    tf_w_ado = state_to_tf(ukf_dict[obj_id].mu)
                    if ros.b_publish_gt_3d_projections:  # concatenate the gt projection
                        tf_w_ado_gt_array = ros.get_closest_pose(
                            class_str, ukf_dict[obj_id].mu[0:3])
                        tf_w_ado_gt = np.eye(4)
                        tf_w_ado_gt[0:3, 3] = tf_w_ado_gt_array[0:3]
                        tf_w_ado_gt[0:3,
                                    0:3] = quat_to_rotm(tf_w_ado_gt_array[3:7])
                        ukf_dict[obj_id].projected_3d_bb = np.vstack(
                            (np.fliplr(
                                pose_to_3d_bb_proj(tf_w_ado, tf_w_ego,
                                                   ukf_dict[obj_id].bb_3d,
                                                   ukf_dict[obj_id].camera)),
                             np.fliplr(
                                 pose_to_3d_bb_proj(tf_w_ado_gt, tf_w_ego_gt,
                                                    ukf_dict[obj_id].bb_3d,
                                                    ukf_dict[obj_id].camera))))
                    else:
                        ukf_dict[obj_id].projected_3d_bb = np.fliplr(
                            pose_to_3d_bb_proj(tf_w_ado, tf_w_ego,
                                               ukf_dict[obj_id].bb_3d,
                                               ukf_dict[obj_id].camera))
                    if class_str in connected_inds:
                        ukf_dict[obj_id].connected_inds = connected_inds[
                            ukf_dict[obj_id].class_str]

        be_time_hist.append(time.time() - t_be_start)
        ros.publish_filter_state(obj_ids_tracked, ukf_dict)
        ros.publish_bb_msg(processed_image, im_seg_mode, loop_time)

        # update running averages
        loop_ave_info = update_running_average(loop_ave_info,
                                               loop_time_hist[-1])
        fe_ave_info = update_running_average(fe_ave_info, fe_time_hist[-1])
        be_ave_info = update_running_average(be_ave_info, be_time_hist[-1])

        if loop_count % 10 == 0:
            print(
                "loop itr {}:\n\tAve front end time = {}\n\tAve back end time = {}\n\tAve loop time - {}\n\t%% detects = {}"
                .format(
                    loop_count, fe_ave_info[0], be_ave_info[0],
                    loop_ave_info[0],
                    100 * ros.im_seg.num_detections / ros.num_imgs_processed))

        # Save current object states in image segmentor
        ros.im_seg.ukf_dict = ukf_dict

        # ros.im_seg_mode = ros.TRACK
        if b_verbose:
            print("FULL END-TO-END time = {:4f}\n".format(time.time() - tic))
        rate.sleep()
        tic = time.time()
        loop_count += 1
Example #15
0
def main():
    np.set_printoptions(precision=3)

    # Process Noise
    q = np.eye(6)
    q[0][0] = 0.0001
    q[1][1] = 0.0001
    q[2][2] = 0.0004
    q[3][3] = 0.0025
    q[4][4] = 0.0025
    q[5][5] = 0.0025
    
    # create measurement noise covariance matrices
    r_imu = np.zeros([2, 2])
    r_imu[0][0] = 0.01
    r_imu[1][1] = 0.03

    r_compass = np.zeros([1, 1])
    r_compass[0][0] = 0.02

    r_encoder = np.zeros([1, 1])
    r_encoder[0][0] = 0.001

    # pass all the parameters into the UKF!
    # number of state variables, process noise, initial state, initial coariance, three tuning paramters, and the iterate function
    state_estimator = UKF(6, q, np.zeros(6), 0.0001*np.eye(6), 0.04, 0.0, 2.0, iterate_x)
    i=0
    csvfile=open('example.csv', 'r')
    for reader in csvfile:
        #print()
        
        #reader = csv.reader(csvfile)
        #reader=next(reader)
        row=reader.split(',')
        #print(reader)
        
        last_time = 0
        
        # read data
        if i!=0:    
            #for row in reader:
            print(row)
            '''
            try:
                row = [float(x) for x in row]
            except ValueError:
                print("error")
            '''
            cur_time = float(row[0])
            d_time = cur_time - last_time
            real_state = np.array([float(row[i]) for i in [5, 6, 4, 3, 2, 1]])

            # create an array for the data from each sensor
            compass_hdg = float(row[9])
            compass_data = np.array([compass_hdg])

            encoder_vel = float(row[10])
            encoder_data = np.array([encoder_vel])

            imu_accel = float(row[7])
            imu_yaw_rate = float(row[8])
            imu_data = np.array([imu_yaw_rate, imu_accel])

            last_time = cur_time

            # prediction is pretty simple
            state_estimator.predict(d_time)

            # updating isn't bad either
            # remember that the updated states should be zero-indexed
            # the states should also be in the order of the noise and data matrices
            state_estimator.update([4, 5], imu_data, r_imu)
            state_estimator.update([2], compass_data, r_compass)
            state_estimator.update([3], encoder_vel, r_encoder)

            print ("--------------------------------------------------------")
            print ("Real state: ", real_state)
            print ("Estimated state: ", state_estimator.get_state())
            print ("Difference: ", real_state - state_estimator.get_state())
        i+=1
import animation
reload(animation)
from animation import Animation

# import plotData
# reload(plotData)
# from plotData import plotData

import ukf
reload(ukf)
from ukf import UKF

dynamics = Dynamics()
animation = Animation()
# dataPlot = plotData()
ukf = UKF()

states = dynamics.states()
mu = ukf.get_mu()
sig = ukf.get_sig()
Ks = ukf.get_k()

t = P.t_start
while t < P.t_end:
    t_next_plot = t + P.t_plot
    while t < t_next_plot:
        t = t + P.Ts
        vc = 1 + 0.5 * np.cos(2 * np.pi * (0.2) * t)
        omegac = -0.2 + 2 * np.cos(2 * np.pi * (0.6) * t)
        noise_v = vc + np.random.normal(
            0, np.sqrt(P.alpha1 * vc**2 + P.alpha2 * omegac**2))
Example #17
0
    def ukf_move_wheelchair(self):

        while rospy.get_time() == 0.0:
            continue

        rospy.sleep(1)

        def fx(x, dt):
            x[0], x[1] = normalize_angle(x[0]), normalize_angle(x[1])
            self.prev_alpha1 = x[0]
            self.prev_alpha2 = x[1]

            return self.caster_model_ukf(x, dt)

        def hx(x):
            # print "2: ", self.prev_alpha1
            delta_alpha1 = x[0] - self.prev_alpha1
            delta_alpha2 = x[1] - self.prev_alpha2
            alpha1dot = delta_alpha1 / self.dt
            alpha2dot = delta_alpha2 / self.dt

            sol = self.meas_model(x[0], x[1], alpha1dot, alpha2dot)
            return sol

        self.ini_pose = [
            self.wheel_cmd.angular.z, -self.wheel_cmd.linear.x, -self.odom_y,
            self.odom_x, self.odom_th
        ]
        self.save_pose_data.append(
            [self.ini_pose[2], self.ini_pose[3], self.ini_pose[4]])

        points = MerweScaledSigmaPoints(n=2, alpha=.1, beta=1., kappa=-1.)
        kf = UKF(dim_x=2,
                 dim_z=2,
                 dt=self.dt,
                 fx=fx,
                 hx=hx,
                 points=points,
                 sqrt_fn=None,
                 x_mean_fn=self.state_mean,
                 z_mean_fn=self.meas_mean,
                 residual_x=self.residual_x,
                 residual_z=self.residual_z)

        # self.ini_val = [normalize_angle(self.l_caster_angle-np.pi), normalize_angle(self.r_caster_angle-np.pi)]
        self.ini_val = [3.1, -3.14]
        self.save_caster_data.append(self.ini_val)

        kf.x = np.array(self.ini_val)  # initial mean state
        kf.P *= 0.0001  # kf.P = eye(dim_x) ; adjust covariances if necessary
        # kf.R *= 0
        # kf.Q *= 0

        zs = []
        xs = []

        # xs.append(self.ini_val)

        count = 0

        # print "Est1: ", normalize_angle(kf.x[0]+np.pi), normalize_angle(kf.x[1]+np.pi)

        rospy.loginfo("Moving robot...")

        last_odom_x = self.odom_x
        last_odom_th = self.odom_th
        start = rospy.get_time()
        self.r.sleep()

        while (rospy.get_time() - start <
               self.move_time) and not rospy.is_shutdown():
            curr_odom_x, curr_odom_th = self.odom_x, self.odom_th
            delta_x, delta_th = curr_odom_x - last_odom_x, curr_odom_th - last_odom_th
            z = np.array([delta_x / self.dt, delta_th / self.dt])
            # z = np.array([self.odom_vx, self.odom_vth])

            if count % self.factor == 0:

                zs.append(z)

                kf.predict()
                kf.update(z)

                xs.append([
                    normalize_angle(kf.x[0] + np.pi),
                    normalize_angle(kf.x[1] + np.pi)
                ])

                # print "Est: ", normalize_angle(kf.x[0]+np.pi), normalize_angle(kf.x[1]+np.pi)
                # print "Act: ", normalize_angle(self.l_caster_angle), normalize_angle(self.r_caster_angle)

            self.save_caster_data.append(
                [self.l_caster_angle, self.r_caster_angle])
            self.save_pose_data.append(
                [self.odom_x, self.odom_y, self.odom_th])
            # print len(self.save_caster_data)

            self.pub_twist.publish(self.wheel_cmd)

            count += 1
            last_odom_x, last_odom_th = curr_odom_x, curr_odom_th
            self.r.sleep()

        # Stop the robot
        self.pub_twist.publish(Twist())

        self.caster_sol_ukf = np.array(xs)
        self.caster_sol_act = np.array(self.save_caster_data)
        self.pose_act = np.array(self.save_pose_data)

        # self.pose_act = np.reshape(self.pose_act, (len(self.save_pose_data), 3))

        rospy.sleep(1)
Example #18
0
def main():
    np.set_printoptions(precision=3)

    # Process Noise
    q = np.eye(5)
    q[0][0] = 0.001
    q[1][1] = 0.001
    q[2][2] = 0.001
    q[3][3] = 0.001
    q[4][4] = 0.001
    # q[5][5] = 0.0025
    realx = []
    realy = []
    realv = []
    realtheta = []
    realw = []
    estimatex = []
    estimatey = []
    estimatev = []
    estimatetheta = []
    estimatew = []
    estimate2y = []
    estimate2x = []
    estimatev2 = []
    realyaw = []
    estimateyaw = []
    estimateyaw2 = []
    realyawr = []
    estimateyawr = []
    estimateyawr2 = []
    estimate3x = []
    estimate3y = []
    ex = []
    ey = []
    # create measurement noise covariance matrices
    r_imu = np.zeros([1, 1])
    r_imu[0][0] = 0.001
    r_compass = np.zeros([1, 1])
    r_compass[0][0] = 0.001
    r_encoder = np.zeros([1, 1])
    r_encoder[0][0] = 0.001
    r_gpsx = np.zeros([1, 1])
    r_gpsx[0][0] = 0.001
    r_gpsy = np.zeros([1, 1])
    r_gpsy[0][0] = 0.001

    ini = np.array([0.000, 0.000, 0.000, 0.000, 0.000])
    xest = np.array([[0.000], [0.000], [0.000], [0.000], [0.000]])

    pest = np.eye(5)
    jf = np.eye(5)

    #ガウス分布の粒子の生成
    p = 100
    x_p = np.zeros((p, 5))
    pw = np.zeros((p, 5))
    x_p_update = np.zeros((p, 5))
    z_p_update = np.zeros((p, 5))
    for i in range(0, p):
        x_p[i] = np.random.randn(5)

    xp = np.zeros((1, 5))

    # pass all the parameters into the UKF!
    # number of state variables, process noise, initial state, initial coariance, three tuning paramters, and the iterate function
    state_estimator = UKF(5, q, ini, np.eye(5), 0.04, 0.0, 2.0, iterate_x)

    with open('example.csv', 'r') as csvfile:
        reader = csv.reader(csvfile)
        headers = next(reader)

        last_time = 0
        # read data
        for row in reader:
            row = [float(x) for x in row]

            cur_time = row[0]
            d_time = cur_time - last_time
            real_state = np.array([row[i] for i in [5, 6, 3, 4, 2]])

            # create an array for the data from each sensor
            compass_hdg = row[9]
            compass_data = np.array([compass_hdg])

            encoder_vel = row[10]
            encoder_data = np.array([encoder_vel])

            gps_x = row[11]
            gpsx_data = np.array([gps_x])

            gps_y = row[12]
            gpsy_data = np.array([gps_y])

            imu_yaw_rate = row[8]
            imu_data = np.array([imu_yaw_rate])

            last_time = cur_time

            observation_data = np.array([[row[11]], [row[12]], [row[10]],
                                         [row[9]], [row[8]]])

            observation_con = np.eye(5)
            observation_con[0][0] = 0.001
            observation_con[1][1] = 0.001
            observation_con[2][2] = 0.001
            observation_con[3][3] = 0.001
            observation_con[4][4] = 0.001

            # prediction is pretty simple
            state_estimator.predict(d_time)

            # updating isn't bad either
            # remember that the updated states should be zero-indexed
            # the states should also be in the order of the noise and data matrices
            state_estimator.update([4], imu_data, r_imu)
            state_estimator.update([3], compass_data, r_compass)
            state_estimator.update([2], encoder_data, r_encoder)
            state_estimator.update([1], gpsy_data, r_gpsy)
            state_estimator.update([0], gpsx_data, r_gpsx)

            #ekf
            ret2 = state_estimator.motion_model(xest)
            jf = state_estimator.jacobi(ret2)
            xest, pest = state_estimator.ekf_estimation(
                xest, pest, observation_data, observation_con, jf, ret2)
            """           
            #particle filter
            for i in range(0,p):
                x_p_update[i,:] = motion_model2(x_p[i,:]) 
                z_p_update[i,:] = x_p_update[i,:]
                pw[i] = cul_weight(observation_data,z_p_update[i,:])
            
            sum1=0.000
            sum2=0.000
            sum3=0.000
            sum4=0.000
            sum5=0.000
            
            for i in range(0,p):
                sum1=sum1+pw[i,0]
                sum2=sum1+pw[i,1]
                sum3=sum1+pw[i,2]
                sum4=sum1+pw[i,3]
                sum5=sum1+pw[i,4]
                
            #normalize
            for i in range(0,p):
                pw[i,0]=pw[i,0]/sum1
                pw[i,1]=pw[i,1]/sum2
                pw[i,2]=pw[i,2]/sum3
                pw[i,3]=pw[i,3]/sum4
                pw[i,4]=pw[i,4]/sum5
            
            #resampling
            for i in range(0,p):
                u = np.random.rand(1,5)
                qt1=np.zeros((1,5))
                for j in range(0,p):
                    qt1=qt1+pw[j]
                    if np.all(qt1) > np.all(u):
                        x_p[i]=x_p_update[j]
                        break
            xest2 =np.zeros((1,5))
            for i in range(0,p):
                xest2[0,0]=xest2[0,0]+x_p[i,0]
                xest2[0,1]=xest2[0,1]+x_p[i,1]
                xest2[0,2]=xest2[0,2]+x_p[i,2]
                xest2[0,3]=xest2[0,3]+x_p[i,3]
                xest2[0,4]=xest2[0,4]+x_p[i,4]
            
            xest2[0,0] = xest2[0,0] / p
            xest2[0,1] = xest2[0,1] / p
            xest2[0,2] = xest2[0,2] / p
            xest2[0,3] = xest2[0,3] / p
            xest2[0,4] = xest2[0,4] / p
            """

            print("----------------------------------------------------------")
            print("Real state: ", real_state)
            print("UKF Estimated state: ", state_estimator.get_state())
            print("EKF Estimated state: ", xest.T)

            ex.append(row[11])
            ey.append(row[12])
            realx.append(real_state[0])
            realy.append(real_state[1])
            estimatex.append(state_estimator.get_state(0))
            estimatey.append(state_estimator.get_state(1))
            realv.append(real_state[2])
            estimatev.append(state_estimator.get_state(2))
            estimate2x.append(xest[0, 0])
            estimate2y.append(xest[1, 0])
            estimatev2.append(xest[2, 0])
            realyaw.append(real_state[3])
            estimateyaw.append(state_estimator.get_state(3))
            estimateyaw2.append(xest[3, 0])
            realyawr.append(real_state[4])
            estimateyawr.append(state_estimator.get_state(4))
            estimateyawr2.append(xest[4, 0])

    #figl=plt.figure(4)
    #plt.subplot(411)
    plt.plot(realx, realy, label="real_state")
    plt.plot(estimatex, estimatey, label="ufk_estimator")
    plt.plot(estimate2x, estimate2y, label="efk_estimator")
    #plt.plot(ex,ey,label="estimator")
    #plt.plot(estimate3x,estimate3y,label="pk_estimator")
    plt.xlabel("x")
    plt.ylabel("y")
    plt.legend(loc="best")
    #plt.xlim(0,2)
    #plt.ylim(0,2)
    """
    plt.subplot(412)
    plt.plot(realv)
    plt.plot(estimatev)
    plt.plot(estimatev2)

    plt.xlabel("sample")
    plt.ylabel("v")   
    
    plt.subplot(413)
    plt.plot(realyaw)
    plt.plot(estimateyaw)
    plt.plot(estimateyaw2)
    plt.xlabel("sample")
    plt.ylabel("yaw")    
    
    plt.subplot(414)
    plt.plot(realyawr)
    plt.plot(estimateyawr)
    plt.plot(estimateyawr2)
    plt.xlabel("sample")
    plt.ylabel("yawr")
    """
    sigma1 = 0
    sigma2 = 0
    sigma3 = 0
    sigma4 = 0
    sigma5 = 0
    sigma6 = 0
    sigma7 = 0
    sigma8 = 0
    sigma9 = 0
    sigma10 = 0
    for i in range(0, len(realx)):
        sigma1 += (realx[i] - estimatex[i])**2
        sigma2 += (realy[i] - estimatey[i])**2
        sigma3 += (realx[i] - estimate2x[i])**2
        sigma4 += (realx[i] - estimate2y[i])**2
        sigma5 += (realx[i] - estimatev[i])**2
        sigma6 += (realy[i] - estimatev2[i])**2
        sigma7 += (realx[i] - estimateyaw[i])**2
        sigma8 += (realx[i] - estimateyaw2[i])**2
        sigma9 += (realx[i] - estimateyawr[i])**2
        sigma10 += (realy[i] - estimateyawr2[i])**2

    REME_x = math.sqrt(sigma1 / len(realx))
    REME_y = math.sqrt(sigma2 / len(realx))
    REME_x2 = math.sqrt(sigma3 / len(realx))
    REME_y2 = math.sqrt(sigma4 / len(realx))
    REME_v1 = math.sqrt(sigma5 / len(realx))
    REME_v2 = math.sqrt(sigma6 / len(realx))
    REME_y1 = math.sqrt(sigma7 / len(realx))
    REME_y2 = math.sqrt(sigma8 / len(realx))
    REME_yr1 = math.sqrt(sigma9 / len(realx))
    REME_yr2 = math.sqrt(sigma10 / len(realx))

    print("RMSE(ukf)_x=", REME_x, "RMSE(ekf)_x=", REME_x2)
    print("RMSE(ukf)_y=", REME_y, "RMSE(ekf)_y=", REME_y2)
    print("RMSE(ukf)_v=", REME_v1, "RMSE(ekf)_v=", REME_v2)
    print("RMSE(ukf)_yaw=", REME_y1, "RMSE(ekf)_yaw=", REME_y2)
    print("RMSE(ukf)_yawr=", REME_yr1, "RMSE(ekf)_yawr=", REME_yr2)
    """
Example #19
0
File: new.py Project: nerv8761/kf
def main():
    np.set_printoptions(precision=3)

    # Process Noise
    q = np.eye(5)
    q[0][0] = 0.001
    q[1][1] = 0.001
    q[2][2] = 0.004
    q[3][3] = 0.025
    q[4][4] = 0.025
    # q[5][5] = 0.0025
    realx = []
    realy = []
    realv = []
    realtheta = []
    realw = []
    estimatex = []
    estimatey = []
    estimatev = []
    estimatetheta = []
    estimatew = []
    estimate2y = []
    estimate2x = []

    # create measurement noise covariance matrices
    r_imu = np.zeros([1, 1])
    r_imu[0][0] = 0.01

    r_compass = np.zeros([1, 1])
    r_compass[0][0] = 0.02

    r_encoder = np.zeros([1, 1])
    r_encoder[0][0] = 0.001

    r_gpsx = np.zeros([1, 1])
    r_gpsx[0][0] = 0.1
    r_gpsy = np.zeros([1, 1])
    r_gpsy[0][0] = 0.1

    ini = np.array([0, 0, 0.3, 0, 0.3])

    # pass all the parameters into the UKF!
    # number of state variables, process noise, initial state, initial coariance, three tuning paramters, and the iterate function
    state_estimator = UKF(5, q, ini, np.eye(5), 0.04, 0.0, 2.0, iterate_x,
                          r_imu, r_compass, r_encoder, r_gpsx, r_gpsy)

    xest = np.zeros((5, 1))
    pest = np.eye(5)
    jf = []

    with open('example.csv', 'r') as csvfile:
        reader = csv.reader(csvfile)
        headers = next(reader)

        last_time = 0
        # read data
        for row in reader:
            row = [float(x) for x in row]

            cur_time = row[0]
            d_time = cur_time - last_time
            real_state = np.array([row[i] for i in [5, 6, 3, 4, 2]])

            # create an array for the data from each sensor
            compass_hdg = row[9]
            compass_data = np.array([compass_hdg])

            encoder_vel = row[10]
            encoder_data = np.array([encoder_vel])

            gps_x = row[11]
            gpsx_data = np.array([gps_x])

            gps_y = row[12]
            gpsy_data = np.array([gps_y])

            imu_yaw_rate = row[8]
            imu_data = np.array([imu_yaw_rate])

            last_time = cur_time

            observation_data = np.array(
                [row[11], row[12], row[10], row[9], row[8]])
            observation_datac = np.array([0.1, 0.1, 0.001, 0.02, 0.01])

            # prediction is pretty simple
            state_estimator.predict(d_time)

            # updating isn't bad either
            # remember that the updated states should be zero-indexed
            # the states should also be in the order of the noise and data matrices
            state_estimator.update([4], imu_data, r_imu)
            state_estimator.update([3], compass_data, r_compass)
            state_estimator.update([2], encoder_data, r_encoder)
            state_estimator.update([1], gpsy_data, r_gpsy)
            state_estimator.update([0], gpsx_data, r_gpsx)
            jf = state_estimator.jacobi(xest)

            xest, pest = state_estimator.ekf_estimation(
                xest, pest, observation_data, observation_datac, jf)

            print("--------------------------------------------------------")
            print("Real state: ", real_state)
            print("Estimated state: ", state_estimator.get_state())
            print("Difference: ", real_state - state_estimator.get_state())
            print("Estimated state2: ", xest)

            realx.append(real_state[0])
            realy.append(real_state[1])
            estimatex.append(state_estimator.get_state(0))
            estimatey.append(state_estimator.get_state(1))
            realv.append(real_state[2])
            estimatev.append(state_estimator.get_state(2))
            estimate2x.append(xest[0])
            estimate2y.append(xest[1])

    figl = plt.figure(2)
    plt.subplot(211)
    plt.plot(estimatex, estimatey, "-b", label="ufk_estimator")
    plt.plot(realx, realy, "-r", label="real_position")
    plt.plot(estimate2x, estimate2y, "-g", label="efk_estimator")
    plt.xlabel("x")
    plt.ylabel("y")
    plt.legend(loc="best")

    plt.subplot(212)
    plt.plot(realv, "-r", label="real_v")
    plt.plot(estimatev, "-b", label="estimator_v")
    plt.legend(loc="best")
Example #20
0

if __name__ == "__main__":

    delta_t = 1.
    position = np.array([0., 0.])
    velocity = np.array([0., 0.])
    acceleration = np.array([1., 0.])  # randomly set ~N(0, 1)
    variance = np.matlib.identity(2, dtype=float) * ahrs8_noise
    measurement = np.array([0., 0.])  # randomly set ~N(position, 1)
    true_acceleration = np.array([1., 0.])
    true_velocity = np.array([0., 0.])
    true_final_velocity = np.array([0., 0.])
    true_position = np.array([0., 0.])

    ukf = UKF(R, Q, G, H)

    for i in range(1, 100):
        acceleration = np.array([
            gauss(true_acceleration[0], ahrs8_noise),
            gauss(true_acceleration[1], ahrs8_noise)
        ])
        final_velocity = np.array([
            velocity[j] + (delta_t * acceleration[j])
            for j in range(len(position))
        ])
        #measurement = np.array([gauss(true_position[k], 0.1) for k in range(len(position))])
        measurement = np.array([
            true_position[k] + delta_t *
            ((final_velocity[k] + velocity[k]) / 2)
            for k in range(len(position))
        [twr.Getx()[1],
         twr.Getx()[1] + body_radius * np.sin(twr.Getx()[2])])
    robot_head.set_xdata(headx)
    robot_head.set_ydata(heady)

    fig.canvas.draw()
    plt.pause(0.01)


if __name__ == "__main__":

    # Two-Wheeled Robot Init
    twr = TWR()  # TWR model object

    # Kalman Filter Init
    ukf = UKF(twr.c, twr.nl)

    body_radius = 0.3
    fig, lines, lines_est, msensor, robot_body, robot_head = InitPlot(
        twr, body_radius)
    mu = ukf.mu
    K = ukf.K
    two_sig_x = np.array([[2 * np.sqrt(ukf.cov.item((0, 0)))],
                          [-2 * np.sqrt(ukf.cov.item((0, 0)))]])
    two_sig_y = np.array([[2 * np.sqrt(ukf.cov.item((1, 1)))],
                          [-2 * np.sqrt(ukf.cov.item((1, 1)))]])
    two_sig_theta = np.array([[2 * np.sqrt(ukf.cov.item((2, 2)))],
                              [-2 * np.sqrt(ukf.cov.item((2, 2)))]])
    for i in range(int(twr.t_end / twr.dt)):
        # truth model updates
        twr.Propagate()