tf.summary.scalar( "theta_loss", tf.reduce_mean(params.THETA_SCALING * error[:, :, params.THETA_IND])) tf.summary.scalar("x_loss_rel", tf.reduce_mean(error_relative[:, :, params.X_IND])) tf.summary.scalar("y_loss_rel", tf.reduce_mean(error_relative[:, :, params.Y_IND])) tf.summary.scalar("theta_loss_rel", tf.reduce_mean(error_relative[:, :, params.THETA_IND])) return loss if __name__ == "__main__": # Read the input data t_chunks, state_chunks, control_chunks, p_chunks = read_data.read_chunks( params.TRAIN_DIR) t_chunks_val, state_chunks_val, control_chunks_val, p_chunks_val = read_data.read_chunks( params.VALIDATION_DIR) # Compute the average time step h = np.mean(np.diff(t_chunks[0])) # Make placeholders h_ph = tf.placeholder(tf.float32, shape=(), name="h") training_ph = tf.placeholder(tf.bool, name="training") state_batch_ph = tf.placeholder(tf.float32, shape=(params.BATCH_SIZE, params.STATE_STEPS, params.STATES), name="state_batch") control_batch_ph = tf.placeholder(tf.float32, shape=(params.BATCH_SIZE,
scale=None, headwidth=3., width=0.002, color=next(colors)) plt.show() if __name__ == "__main__": """ Test basic plotting. """ import read_data # Read the data t_chunks, state_chunks, control_chunks, p_chunks = read_data.read_chunks( params.TRAIN_DIR) # Plot a couple states states = [] for i in range(4): states.append(np.expand_dims(state_chunks[i], axis=0)) plot_states(states) # Plot the states zoomed in states = [] chunk = np.expand_dims(state_chunks[0], axis=0) for i in range(3): states.append(chunk[:, i * params.STATE_STEPS:(i + 1) * params.STATE_STEPS]) plot_states(states)
feed_dict[self.i_ph] = i feed_dict[self.control_check_batch_ph] = control_check_batch feed_dict[self.state_batch_ph] = state_batch feed_dict[self.control_batch_ph] = control_batch i, state_batch, control_batch = self.sess.run( (self.i, self.state_batch, self.control_batch), feed_dict=feed_dict) return i, state_batch, control_batch if __name__ == "__main__": m = TestDriftModel() t_chunks, state_chunks, control_chunks, p_chunks = read_data.read_chunks( params.VALIDATION_DIR) for i in range(10): # Make a random input batch state_batch, control_batch, state_check_batch, control_check_batch = process_data.random_batch( state_chunks, control_chunks, p_chunks) # state by simply integrating out the state differences. loss_base = state_check_batch[:, -1] - ( state_batch[:, -1] + params.CHECK_STEPS * (state_batch[:, -1] - state_batch[:, -2])) # Use the learned model i, state_batch, control_batch = m.compute_f(0, state_batch, control_batch, control_check_batch)