def build_seq_model(cfg, inputs, lstm_initial_state, initial_poses, imu_data, ekf_initial_state, ekf_initial_covariance, is_training, get_activations=False, use_initializer=False, use_ekf=False): print("Building CNN...") cnn_outputs = cnn_layer(inputs, cnn_model_lidar, is_training, get_activations) def f1(): return initializer_layer(cnn_outputs, cfg) def f2(): return lstm_initial_state, ekf_initial_state, ekf_initial_covariance feed_init_states, feed_ekf_init_state, feed_ekf_init_covar = tf.cond( use_initializer, true_fn=f1, false_fn=f2) print("Building RNN...") lstm_outputs, lstm_states = rnn_layer(cfg, cnn_outputs, feed_init_states) print("Building FC...") fc_outputs = fc_layer(lstm_outputs, fc_model) stack1 = [] for i in range(fc_outputs.shape[0]): stack2 = [] for j in range(fc_outputs.shape[1]): stack2.append(tf.diag(tf.square(fc_outputs[i, j, 6:]))) stack1.append(tf.stack(stack2, axis=0)) nn_covar = tf.stack(stack1, axis=0) if use_ekf: print("Building EKF...") # at this point the outputs from the fully connected layer are [x, y, z, yaw, pitch, roll, 6 x covars] with tf.variable_scope("imu_noise_params", reuse=True): gyro_bias_diag = tf.get_variable('gyro_bias_sqrt') acc_bias_diag = tf.get_variable('acc_bias_sqrt') gyro_covar_diag = tf.get_variable('gyro_sqrt') acc_covar_diag = tf.get_variable('acc_sqrt') gyro_bias_covar = tf.diag(tf.square(gyro_bias_diag) + 1e-4) acc_bias_covar = tf.diag(tf.square(acc_bias_diag) + 1e-4) gyro_covar = tf.diag(tf.square(gyro_covar_diag) + 1e-4) acc_covar = tf.diag(tf.square(acc_covar_diag) + 1e-4) ekf_out_states, ekf_out_covar = ekf.full_ekf_layer( imu_data, fc_outputs[..., 0:6], nn_covar, feed_ekf_init_state, feed_ekf_init_covar, gyro_bias_covar, acc_bias_covar, gyro_covar, acc_covar) rel_disp = tf.concat( [ekf_out_states[1:, :, 0:3], ekf_out_states[1:, :, 11:14]], axis=-1) rel_covar = tf.concat([ tf.concat([ ekf_out_covar[1:, :, 0:3, 0:3], ekf_out_covar[1:, :, 0:3, 11:14] ], axis=-1), tf.concat([ ekf_out_covar[1:, :, 11:14, 0:3], ekf_out_covar[1:, :, 11:14, 11:14] ], axis=-1) ], axis=-2) else: rel_disp = fc_outputs[..., 0:6] rel_covar = nn_covar ekf_out_states = tf.zeros( [fc_outputs.shape[0], fc_outputs.shape[1], 17], dtype=tf.float32) ekf_out_covar = tf.eye( 17, batch_shape=[fc_outputs.shape[0], fc_outputs.shape[1]], dtype=tf.float32) print("Building SE3...") # at this point the outputs are the relative states with covariance, need to only select the part the # loss cares about se3_outputs = se3_layer(rel_disp, initial_poses) return rel_disp, rel_covar, se3_outputs, lstm_states, ekf_out_states[ -1, ...], ekf_out_covar[-1, ...]
def build_seq_model(cfg, inputs, lstm_initial_state, initial_poses, imu_data, ekf_initial_state, ekf_initial_covariance, dt, is_training, get_activations=False, use_initializer=False, use_ekf=False, fc_labels=None): print("Building CNN...") cnn_outputs = cnn_layer(inputs, cnn_model_lidar, is_training, get_activations) def f1(): return initializer_layer(cnn_outputs, cfg) def f2(): return lstm_initial_state, ekf_initial_state, ekf_initial_covariance with tf.name_scope("use_initializer_cond"): feed_init_states, feed_ekf_init_state, feed_ekf_init_covar = tf.cond( use_initializer, true_fn=f1, false_fn=f2) print("Building RNN...") lstm_outputs, lstm_states = rnn_layer(cfg, cnn_outputs, feed_init_states) print("Building FC...") # if we want to train ekf with ground truth, by passing all previous layers if cfg.train_ekf_with_fcgt: fc_outputs = fc_labels else: fc_outputs = fc_layer(lstm_outputs, fc_model) # if we want to fix covariances in fc_outputs if cfg.fix_fc_covar: with tf.name_scope("fix_ekf_covar"): fc_outputs_shape = fc_outputs.get_shape().as_list() fixed_covar = np.stack([cfg.fc_covar_fix_val] * fc_outputs_shape[1]) fixed_covar = np.stack([fixed_covar] * fc_outputs_shape[0]) fc_outputs = tf.concat( [fc_outputs[:, :, 0:6], tf.constant(fixed_covar, tf.float32)], axis=2) with tf.name_scope("stack_for_ekf"): stack1 = [] for i in range(fc_outputs.shape[0]): stack2 = [] for j in range(fc_outputs.shape[1]): stack2.append( tf.diag( tf.square(fc_outputs[i, j, 6:]) + np.array([1e-6, 1e-6, 1e-6, 1e-7, 1e-7, 1e-7]))) stack1.append(tf.stack(stack2, axis=0)) nn_covar = tf.stack(stack1, axis=0) if use_ekf: print("Building EKF...") # at this point the outputs from the fully connected layer are [x, y, z, yaw, pitch, roll, 6 x covars] with tf.variable_scope("imu_noise_params", reuse=True): gyro_bias_diag = tf.get_variable('gyro_bias_sqrt') acc_bias_diag = tf.get_variable('acc_bias_sqrt') gyro_covar_diag = tf.get_variable('gyro_sqrt') acc_covar_diag = tf.get_variable('acc_sqrt') with tf.name_scope("ekf_ops"): gyro_bias_covar = tf.diag(tf.square(gyro_bias_diag) + 1e-8) acc_bias_covar = tf.diag(tf.square(acc_bias_diag) + 1e-8) gyro_covar = tf.diag(tf.square(gyro_covar_diag) + 1e-8) acc_covar = tf.diag(tf.square(acc_covar_diag) + 1e-8) ekf_out_states, ekf_out_covar = ekf.full_ekf_layer( imu_data, fc_outputs[..., 0:6], nn_covar, feed_ekf_init_state, feed_ekf_init_covar, gyro_bias_covar, acc_bias_covar, gyro_covar, acc_covar, dt) rel_disp = tf.concat( [ekf_out_states[1:, :, 0:3], ekf_out_states[1:, :, 11:14]], axis=-1) rel_covar = tf.concat([ tf.concat([ ekf_out_covar[1:, :, 0:3, 0:3], ekf_out_covar[1:, :, 0:3, 11:14] ], axis=-1), tf.concat([ ekf_out_covar[1:, :, 11:14, 0:3], ekf_out_covar[1:, :, 11:14, 11:14] ], axis=-1) ], axis=-2) else: with tf.name_scope("ekf_ops"): rel_disp = fc_outputs[..., 0:6] rel_covar = nn_covar ekf_out_states = tf.zeros( [fc_outputs.shape[0], fc_outputs.shape[1], 17], dtype=tf.float32) ekf_out_covar = tf.eye( 17, batch_shape=[fc_outputs.shape[0], fc_outputs.shape[1]], dtype=tf.float32) print("Building SE3...") # at this point the outputs are the relative states with covariance, need to only select the part the # loss cares about se3_outputs = se3_layer(rel_disp, initial_poses) return rel_disp, rel_covar, se3_outputs, lstm_states, \ ekf_out_states[-1, ...], ekf_out_covar[-1, ...], \ feed_init_states, feed_ekf_init_state, feed_ekf_init_covar
def ekf_update(): imu_meas, nn_meas, nn_covar, prev_state, prev_covar, gyro_bias_covar, acc_bias_covar, gyro_covar, acc_covar = no_movement_sample_data() ekf.full_ekf_layer(imu_meas, nn_meas, nn_covar, prev_state, prev_covar, gyro_bias_covar, acc_bias_covar, gyro_covar, acc_covar) return True
def build_seq_model(cfg, inputs, lstm_initial_state, initial_poses, imu_data, ekf_initial_state, ekf_initial_covariance, is_training, get_activations=False, use_initializer=False): print("Building CNN...") cnn_outputs = cnn_layer(inputs, cnn_model_lidar, is_training, get_activations) def f1(): return initializer_layer(cnn_outputs, cfg) def f2(): return lstm_initial_state, ekf_initial_state, ekf_initial_covariance feed_init_states, feed_ekf_init_state, feed_ekf_init_covar = tf.cond( use_initializer, true_fn=f1, false_fn=f2) print("Building RNN...") lstm_outputs, lstm_states = rnn_layer(cfg, cnn_outputs, feed_init_states) print("Building FC...") fc_outputs = fc_layer(lstm_outputs, fc_model) print("Building EKF...") # at this point the outputs from the fully connected layer are [x, y, z, yaw, pitch, roll, 6 x covars] stack1 = [] for i in range(fc_outputs.shape[0]): stack2 = [] for j in range(fc_outputs.shape[1]): stack2.append(tf.diag(fc_outputs[i, j, 6:])) stack1.append(tf.stack(stack2, axis=0)) nn_covar = tf.stack(stack1, axis=0) with tf.variable_scope("imu_noise_params", reuse=tf.AUTO_REUSE): gyro_bias_diag = tf.Variable(tf.random_normal([3], stddev=0.1), name="gyro_bias_sqrt", trainable=False) acc_bias_diag = tfe.Variable(tf.random_normal([3], stddev=0.1), name="acc_bias_sqrt", trainable=False) gyro_covar_diag = tfe.Variable(tf.random_normal([3], stddev=1), name="gyro_sqrt", trainable=False) acc_covar_diag = tfe.Variable(tf.random_normal([3], stddev=1), name="acc_sqrt", trainable=False) gyro_bias_covar = tf.diag(tf.square(gyro_bias_diag)) acc_bias_covar = tf.diag(tf.square(acc_bias_diag)) gyro_covar = tf.diag(tf.square(gyro_covar_diag)) acc_covar = tf.diag(tf.square(acc_covar_diag)) ekf_out_states, ekf_out_covar = ekf.full_ekf_layer( imu_data, fc_outputs[..., 0:6], nn_covar, feed_ekf_init_state, feed_ekf_init_covar, gyro_bias_covar, acc_bias_covar, gyro_covar, acc_covar) print("Building SE3...") # at this point the outputs from the ekf are the full states with covariance, need to only select the part the # loss cares about rel_disp = tf.concat( [ekf_out_states[1:, :, 0:3], ekf_out_states[1:, :, 11:14]], axis=-1) rel_covar = tf.concat([ tf.concat( [ekf_out_covar[1:, :, 0:3, 0:3], ekf_out_covar[1:, :, 0:3, 11:14]], axis=-1), tf.concat([ ekf_out_covar[1:, :, 11:14, 0:3], ekf_out_covar[1:, :, 11:14, 11:14] ], axis=-1) ], axis=-2) se3_outputs = se3_layer(rel_disp, initial_poses) return rel_disp, rel_covar, se3_outputs, lstm_states, ekf_out_states[ -1, ...], ekf_out_covar[-1, ...]
shape=[cfg.batch_size, 7]) dt = tf.placeholder(tf.float32, shape=[cfg.timesteps, cfg.batch_size], name="dt") stack1 = [] for i in range(fc_outputs.shape[0]): stack2 = [] for j in range(fc_outputs.shape[1]): stack2.append(tf.diag(tf.square(fc_outputs[i, j, 6:]))) stack1.append(tf.stack(stack2, axis=0)) nn_covar = tf.stack(stack1, axis=0) ekf_out_states, ekf_out_covar = ekf.full_ekf_layer( imu_data, fc_outputs[..., 0:6], nn_covar, ekf_initial_state, ekf_initial_covariance, gyro_bias_covar, acc_bias_covar, gyro_covar, acc_covar, dt) rel_disp = tf.concat( [ekf_out_states[1:, :, 0:3], ekf_out_states[1:, :, 11:14]], axis=-1) rel_covar = tf.concat([ tf.concat( [ekf_out_covar[1:, :, 0:3, 0:3], ekf_out_covar[1:, :, 0:3, 11:14]], axis=-1), tf.concat( [ekf_out_covar[1:, :, 11:14, 0:3], ekf_out_covar[1:, :, 11:14, 11:14]], axis=-1) ], axis=-2) se3_outputs = model.se3_layer(rel_disp, initial_poses)