def __init__(self, Nrr=150., Izz=10., Kt1=0.1, zb=0., Mqq=100., ycp=0., xb=0., zcp=0., Yvv=100., yg=0., Ixx=10., Kt0=0.1, Xuu=1., xg=0., Zww=100., W=15.4*9.81, m=15.4, B=15.4*9.81, zg=0., Kpp=100., Qt1=-0.001, Qt0=0.001, Iyy=10., yb=0., xcp=0.1): # become dynamics model Dynamics.__init__(self) # constant parameters self.Nrr, self.Izz, self.Kt1, self.zb, self.Mqq, self.ycp, self.xb, self.zcp, self.Yvv, self.yg, self.Ixx, self.Kt0, self.Xuu, self.xg, self.Zww, self.W, self.m, self.B, self.zg, self.Kpp, self.Qt1, self.Qt0, self.Iyy, self.yb, self.xcp = Nrr, Izz, Kt1, zb, Mqq, ycp, xb, zcp, Yvv, yg, Ixx, Kt0, Xuu, xg, Zww, W, m, B, zg, Kpp, Qt1, Qt0, Iyy, yb, xcp
def body(a, beta): def curr_energy(z, aux=None): return (1 - beta) * init_energy(z) + (beta) * final_energy(z, aux=aux) last_x = a[1] w = a[2] v = a[3] if refresh: refreshed_v = v * tf.sqrt(1 - refreshment) + tf.random_normal( tf.shape(v)) * tf.sqrt(refreshment) else: refreshed_v = tf.random_normal(tf.shape(v)) w = w + beta_diff * (- final_energy(last_x, aux=aux) \ + init_energy(last_x, aux=aux)) dynamics = Dynamics(x_dim, energy_function=curr_energy, eps=step_size, hmc=True, T=leapfrogs) Lx, Lv, px = dynamics.forward(last_x, aux=aux, init_v=refreshed_v) mask = (px - tf.random_uniform(tf.shape(px)) >= 0.) updated_x = tf.where(mask, Lx, last_x) updated_v = tf.where(mask, Lv, -Lv) return (px, updated_x, w, updated_v)
def stabSep(dyn): """ See [1] for implementation details References: [1] : https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=78129&tag=1 """ A_s, V, l = spl.schur(dyn.A, sort='lhp') B_s = V.T @ dyn.B C_s = dyn.C @ V min_lmbda = np.min(npl.eigvals(A_s)) threshold = min_lmbda / 1e5 l = sum(npl.eigvals(A_s) < threshold) # remove close to unstable elements # stable components A_n = A_s[:l, :l] B_n = B_s[:l, :] C_n = C_s[:, :l] # cross terms A_c = A_s[:l, l:] # unstable components A_p = A_s[l:, l:] B_p = B_s[l:, :] C_p = C_s[:, l:] # obtain stable and unstable dynamic systems dyn_unstable = Dynamics(A_p, B_p, C_p) B_tilde = np.hstack([B_n, A_c]) dyn_stable = Dynamics(A_n, B_tilde, C_n) return dyn_stable, dyn_unstable, V, l
def __init__(self, sess=None, env_name=None, feature_dim=None, encoder_gamma=None, encoder_hidden_size=None, dynamics_hidden_size=None, invdyn_hidden_size=None, encoder_lr=None, dynamics_lr=None, invdyn_lr=None): super(WrappedEnv, self).__init__() self._sess = sess self._env = gym.make(env_name) self._state_dim = self._env.observation_space.shape[0] self._action_dim = self._env.action_space.shape[0] self._feature_dim = feature_dim self._encoder_gamma = encoder_gamma self._experience_buffer_size = 50000 self.observation_space = spaces.Box( np.array([-np.inf] * self._feature_dim), np.array([np.inf] * self._feature_dim)) self.action_space = self._env.action_space self._num_hidden_layers = 2 self._encoder_hidden_sizes = [encoder_hidden_size ] * self._num_hidden_layers self._dynamics_hidden_sizes = [dynamics_hidden_size ] * self._num_hidden_layers self._invdyn_hidden_sizes = [invdyn_hidden_size ] * self._num_hidden_layers self._encoder = Encoder(sess=self._sess, input_dim=self._state_dim, output_dim=feature_dim, hidden_sizes=self._encoder_hidden_sizes, learning_rate=encoder_lr) self._dynamics = Dynamics(sess=self._sess, state_dim=feature_dim, action_dim=self._action_dim, hidden_sizes=self._dynamics_hidden_sizes, learning_rate=dynamics_lr) self._inv_dynamics = InverseDynamics( sess=self._sess, state_dim=feature_dim, action_dim=self._action_dim, hidden_sizes=self._invdyn_hidden_sizes, learning_rate=invdyn_lr) self._state = self._env.reset()
def simulate(arm, traj, tf, dt_dyn, dt_control=None, u_inj=None): """ Inputs: arm: arm object traj: desired trajectory tf: desired final simulation time dt: desired dt for control u_inj: control injection """ dynamics = Dynamics(arm, dt_dyn) # dynamics = Dynamics(arm, dt_dyn, noise_torsion=[1e-3,0], noise_bending=[1e-3,0,0,0]) if dt_control is None: dt_control = dt_dyn else: dt_control = max(np.floor(dt_control / dt_dyn) * dt_dyn, dt_dyn) T = int(dt_control / dt_dyn) dyn_reduced = Dynamics(arm, dt_control, n=arm.state.n) controller = Controller(dyn_reduced) state_list = [] control_list = [] t_steps = int(np.floor(tf / dt_dyn)) t_arr = np.linspace(0, tf, t_steps + 1) for i, t in enumerate(t_arr): print('Progress: {:.2f}%'.format(float(i) / (len(t_arr) - 1) * 100), end='\r') # mode = finiteStateMachine(y,wp) # mode = 'none' # mode = 'damping' mode = 'mpc' if i % T == 0: j = i // T if j not in u_inj: wp = traj[:, j] y = simulateMeasurements(arm) u = controller.controlStep(y, wp, mode) else: u = {} u['rot'] = u_inj[j]['rot'] u['lat'] = u_inj[j]['lat'] arm = dynamics.dynamicsStep(arm, u) state_list.append(copy.copy(arm.state)) control_list.append(u) return state_list, control_list, t_arr
def update(self,player_pos): # player_pos = positionof player = x,y player_x, player_y= player_pos[0] , player_pos[1] x1,y1 = self.position() new_pos = Dynamics(x1,y1,player_x,player_y,self.frame,self.rect) new_cen_x,new_cen_y = new_pos.near_move(self.maxstep) new_cen_x,new_cen_y = new_pos.sphere_move(new_cen_x,new_cen_y) new_x,new_y = new_cen_x - (self.rect.width/2.0) , new_cen_y - (self.rect.height/2.0) self.rect.topleft = new_x,new_y
def __init__(self, use_penalty=False, return_states=False): self.dyn = Dynamics(dt=0.02) self.Q = np.array([10, 10, 100, 20.5, 20.5, 20, 9.8, 9.8, 10]) self.Qf = self.Q self.x0 = np.array([0, 0, -5., 0, 0, 0, 0, 0, 0]) self.x_des = self.x0.copy() self.use_penalty = use_penalty self.initialized = False self.return_states = return_states w_max = np.pi / 2 self.u_max = np.array([1, w_max, w_max, w_max]) self.u_min = np.array([0, -w_max, -w_max, -w_max]) self.mu = 10.
def __init__(self, model, noisemodel, state, breakpoints=None): """ May be used when noise is added to a problem, i. e. for stochastic differential equations. All responsibility for handling the stochastics is placed on a "noisemodel" function that is used together with the derivative model normally used by the solver methods of the solvers of the Dynamics and StiffDynamics classes. """ # First take care of what is inherited: Dynamics.__init__(self, model, state, breakpoints) # Then add the separate noise model self.__noisemodel = noisemodel
def main(): # instantiate dynamics dyn = Dynamics() # instantiate leg leg = Leg(dyn, alpha=0, bound=True) # set boudaries t0 = 0 s0 = np.array([1000, 1000, *np.random.randn(4)], dtype=float) l0 = np.random.randn(len(s0)) tf = 1000 sf = np.zeros(len(s0)) leg.set(t0, s0, l0, tf, sf) # define problem udp = Problem(leg, atol=1e-8, rtol=1e-8) prob = pg.problem(udp) # instantiate algorithm uda = pg7.snopt7(True, "/usr/lib/libsnopt7_c.so") uda.set_integer_option("Major iterations limit", 4000) uda.set_integer_option("Iterations limit", 40000) uda.set_numeric_option("Major optimality tolerance", 1e-2) uda.set_numeric_option("Major feasibility tolerance", 1e-4) algo = pg.algorithm(uda) # instantiate population with one chromosome pop = pg.population(prob, 1) #pop = pg.population(prob, 0) #pop.push_back([1000, *l0]) # optimise pop = algo.evolve(pop)
def test_trajectory(traj): ntrailers = traj['ntrailers'] t = traj['t'] x = traj['x'] y = traj['y'] phi = traj['phi'] theta = np.array(traj['theta']) u1 = traj['u1'] u2 = traj['u2'] fu = make_interp_spline(t, np.array([u1, u2]).T, k=3) dynamics = Dynamics(ntrailers) def rhs(t, state): u = fu(t) r = dynamics.rhs(*state, *u) r = np.reshape(r, (-1,)) return r state0 = [x[0], y[0], phi[0]] + list(theta[:,0]) ans = solve_ivp(rhs, [t[0], t[-1]], state0, t_eval=t, max_step=1e-3) plt.gca().set_prop_cycle(None) plt.plot(ans.y[0], ans.y[1]) plt.gca().set_prop_cycle(None) plt.plot(x, y, '--') plt.show()
def main(): population = 6400 # Number of agents average_degree = 4 # Average degree of network num_episode = 1 # Number of episodes for taking ensemble average network_type = 'lattice' # Topology of network # Creating new directory for data files new_dirname = "dyn_time_evolution_n" + str(population) + "e" + str( num_episode) + "_" + network_type os.mkdir(new_dirname) os.chdir(new_dirname) simulation = Dynamics(population, average_degree, network_type) for episode in range(num_episode): random.seed() simulation.one_episode(episode)
def __init__(self, model, state, breakpoints=None): """ Basic initialization for the class. Cf. the Dynamics class for an explanation of the input parameters. """ # First take care of what is inherited: Dynamics.__init__(self, model, state, breakpoints) # The explicit solvers presently only work for stacks, lists # and 'd' arrays: assert self._mode == 'list', \ "StiffDynamics only handles stacks, lists and 'd' arrays at the moment!" # History array for the gearX solvers: self.__prev = [] # For the gearX solvers
class CostFunctor: def __init__(self, use_penalty=False, return_states=False): self.dyn = Dynamics(dt=0.02) self.Q = np.array([10, 10, 100, 20.5, 20.5, 20, 9.8, 9.8, 10]) self.Qf = self.Q self.x0 = np.array([0, 0, -5., 0, 0, 0, 0, 0, 0]) self.x_des = self.x0.copy() self.use_penalty = use_penalty self.initialized = False self.return_states = return_states w_max = np.pi / 2 self.u_max = np.array([1, w_max, w_max, w_max]) self.u_min = np.array([0, -w_max, -w_max, -w_max]) self.mu = 10. def __call__(self, z): if z.shape[0] == z.size: Tm = z.size pop_size = 1 else: pop_size, Tm = z.shape # default to use with nempc horizon = Tm // 4 xk = np.tile(self.x0[:, None], pop_size) cost = np.zeros(pop_size) u_traj = z.reshape(pop_size, horizon, 4) if self.return_states: x_traj = [] for k in range(horizon): xk = self.dyn.rk4(xk, u_traj[:, k].T) x_err = xk - self.x_des[:, None] if k < horizon - 1: cost += np.sum(x_err**2 * self.Q[:, None], axis=0) else: cost += np.sum(x_err**2 * self.Qf[:, None], axis=0) # penalty is only used to add cost for bound constraint violations # when testing with gradient-based optimizer penalty = 0 if self.use_penalty: cost = cost.item() if not self.initialized: self.z_max = np.tile(self.u_max, horizon) self.z_min = np.tile(self.u_min, horizon) self.initialized = True upper_bound = z - self.z_max mask = upper_bound > 0 penalty += np.sum(upper_bound[mask]**2) lower_bound = self.z_min - z mask = lower_bound > 0 penalty += np.sum(lower_bound[mask]**2) penalty *= self.mu / 2 if self.return_states: x_traj = np.array(x_traj).flatten() return x_traj, cost + penalty return cost + penalty
def update(self,key): # key = a tuple # 1 = positive movement, -1 = negative movement, 0 = no movement y_move = key[0] x_move = key[1] x1,y1 = self.position() new_x_cen = x1 + (self.maxstep*x_move) new_y_cen = y1 + (self.maxstep*y_move) new_pos = Dynamics(new_x_cen,new_y_cen,x1,y1,self.dimensions,self.rect) new_x,new_y = new_x_cen - (self.rect.width/2.0) , new_y_cen - (self.rect.height/2.0) self.rect.topleft = new_pos.sphere_move(new_x,new_y)
def __init__(self, SCALE): NOTES, FREQ_DICT = SCALE self.note = Note(NOTES, FREQ_DICT) self.rhythm = Rhythm() self.dynamics = Dynamics() self.equalizerFactor = {} self.equalizerFactor[-0.1] = 1.0 self.equalizerFactor[220] = 1.0 self.equalizerFactor[440] = 0.7 self.equalizerFactor[880] = 0.35 self.equalizerFactor[1760] = 0.15 self.equalizerFactor[3520] = 0.15 self.equalizerFactor[7040] = 0.15 self.equalizerFactor[14080] = 0.15 self.equalizerFactor[28160] = 0.15 self.equalizerFactor[56320] = 0.15 self.equalizerBreakPoints = [ -0.1, 220, 440, 880, 1760, 3520, 7040, 14080, 28160, 56320 ]
def __init__(self, config): self.physical_params = config["physical_params"] self.T = config["T"] self.dt = config["dt"] self.initial_state = config["xinit"] self.goal_state = config["xgoal"] self.ellipse_arc = config["ellipse_arc"] self.deviation_cost = config["deviation_cost"] self.Qf = config["Qf"] self.limits = config["limits"] self.n_state = 6 self.n_nominal_forces = 4 self.tire_model = config["tire_model"] self.initial_guess_config = config["initial_guess_config"] self.puddle_model = config["puddle_model"] self.force_constraint = config["force_constraint"] self.visualize_initial_guess = config["visualize_initial_guess"] self.dynamics = Dynamics(self.physical_params.lf, self.physical_params.lr, self.physical_params.m, self.physical_params.Iz, self.dt)
def get_hmc_samples(x_dim, eps, energy_function, sess, T=10, steps=200, samples=None): hmc_dynamics = Dynamics(x_dim, energy_function, T=T, eps=eps, hmc=True) hmc_x = tf.placeholder(tf.float32, shape=(None, x_dim)) Lx, _, px, hmc_MH = propose(hmc_x, hmc_dynamics, do_mh_step=True) if samples is None: samples = gaussian.get_samples(n=200) final_samples = [] for t in range(steps): final_samples.append(np.copy(samples)) Lx_, px_, samples = sess.run([Lx, px, hmc_MH[0]], {hmc_x: samples}) return np.array(final_samples)
def __init__(self, make_env, num_timesteps, envs_per_process): self.make_env = make_env self.envs_per_process = envs_per_process self.num_timesteps = num_timesteps self._set_env_vars() self.policy = CnnPolicy(scope='cnn_pol', ob_space=self.ob_space, ac_space=self.ac_space, hidsize=512, feat_dim=512, ob_mean=self.ob_mean, ob_std=self.ob_std, layernormalize=False, nl=tf.nn.leaky_relu) self.feature_extractor = InverseDynamics(policy=self.policy, feat_dim=512, layernormalize=0) self.dynamics = Dynamics(auxiliary_task=self.feature_extractor, mode=MODE, feat_dim=512) self.agent = PpoOptimizer( scope='ppo', ob_space=self.ob_space, ac_space=self.ac_space, policy=self.policy, use_news=0, gamma=.99, lam=.98, #TODO Change this for potentially vastly different results nepochs=3, nminibatches=16, lr=1e-4, cliprange=.1, #TODO Change this as well nsteps_per_seg=256, nsegs_per_env=1, ent_coeff=.001, normrew=1, normadv=1, ext_coeff=0., int_coeff=1., dynamics=self.dynamics) self.agent.to_report['aux'] = tf.reduce_mean( self.feature_extractor.loss) self.agent.total_loss += self.agent.to_report['aux'] self.agent.to_report['dyn_loss'] = tf.reduce_mean(self.dynamics.loss) self.agent.total_loss += self.agent.to_report['dyn_loss'] self.agent.to_report['feat_var'] = tf.reduce_mean( tf.nn.moments(self.feature_extractor.features, [0, 1])[1])
def __init__(self, SCALE): NOTES, FREQ_DICT = SCALE self.note = Note(NOTES, FREQ_DICT) self.rhythm = Rhythm() self.dynamics = Dynamics() self.equalizerFactor = { } self.equalizerFactor[-0.1] = 1.0 self.equalizerFactor[220] = 1.0 self.equalizerFactor[440] = 0.7 self.equalizerFactor[880] = 0.35 self.equalizerFactor[1760] = 0.15 self.equalizerFactor[3520] = 0.15 self.equalizerFactor[7040] = 0.15 self.equalizerFactor[14080] = 0.15 self.equalizerFactor[28160] = 0.15 self.equalizerFactor[56320] = 0.15 self.equalizerBreakPoints = [-0.1, 220, 440, 880, 1760, 3520, 7040, 14080, 28160, 56320]
def __init__(self, x0, xf): Dynamics.__init__(self) self.x0 = np.array(x0, float) self.xf = np.array(xf, float)
def calculation(self): start = self.start goal = self.goal theta, theta_future, displacement_rear, displacement_rear_future, steering_step = \ Dynamics(self.wheelbase, search_length=2.5, speed=5, dt=1) # bz = Bezier(start, goal, self.start_heading, self.goal_heading, self.start_steering, # self.mapsize, self.car_length, self.car_width, self.wheelbase, self.mapsize * 3, "NoFound") # bezier_spline = bz.calculation() x = start[0] y = start[1] x_future = x y_future = y x_prev = x y_prev = y heading_state = self.start_heading rotate_angle = heading_state steering_state = self.start_steering found = 0 cost_list = [[0, [x, y], heading_state, steering_state]] next_state = cost_list path_discrete = list([]) # Initialize discrete path global path global path_tree path = [] # Initialize continuous path tree_leaf = [[x, y]] # Initialize search tree leaf (search failed) search = 1 step = 1 path_tree = [] # Initialize continuous path for search trees while found != 1: if search >= self.freeGrid_num: break cost_list.sort(key=lambda x: x[0]) next_state = cost_list.pop(0) path_discrete.append(np.round(next_state[1])) path.append(next_state[1]) [x, y] = next_state[1] [x_future, y_future] = [x, y] heading_state = next_state[2] steering_state = next_state[3] if step > 1: [x_prev, y_prev] = path[step - 1] step += 1 rotate_angle = heading_state if sqrt( np.dot(np.subtract([x, y], goal), np.subtract( [x, y], goal))) <= self.tolerance: found = 1 rotate_matrix = [[np.cos(rotate_angle), -np.sin(rotate_angle)], [np.sin(rotate_angle), np.cos(rotate_angle)]] action = (np.dot(displacement_rear, rotate_matrix)).tolist() action_future = np.dot(displacement_rear_future, rotate_matrix) candidates = np.add([x, y], action) candidates_future = np.add([x_future, y_future], action_future) candidates_round = np.round(candidates).astype(int) heading_state = np.add(heading_state, theta) invalid_ID = [ ((candidates_round[i] == path).all(1).any() | (candidates_round[i] == self.danger_zone).all(1).any() | (candidates_round[i] == tree_leaf).all(1).any()) for i in range(len(candidates_round)) ] remove_ID = np.unique( np.where((candidates < 0) | (candidates > self.mapsize))[0]) candidates = np.delete(candidates, remove_ID, axis=0) candidates_future = np.delete(candidates_future, remove_ID, axis=0) heading_state = np.delete(heading_state, remove_ID, axis=0) candidates = np.delete(candidates, np.where(invalid_ID), axis=0) candidates_future = np.delete(candidates_future, np.where(invalid_ID), axis=0) heading_state = np.delete(heading_state, np.where(invalid_ID), axis=0) if len(candidates) > 0: cost_list = [] for i in range(len(candidates)): diff = np.square(candidates[i] - self.bezier_spline) min_dis = min(np.sqrt(np.sum(diff, axis=1))) diff_future = np.square(candidates_future[i] - self.bezier_spline) min_dis_future = min(np.sqrt(np.sum(diff_future, axis=1))) total_cost = min_dis + min_dis_future cost_list.append([ total_cost, candidates[i], heading_state[i], steering_step[i] ]) else: search += 1 if (next_state[1] == tree_leaf).all(1).any(): tree_leaf.append(np.round([x_prev, y_prev])) else: tree_leaf.append((np.round([x, y])).tolist()) x = start[0] y = start[1] x_future = x y_future = y x_prev = x y_prev = y heading_state = self.start_heading rotate_angle = heading_state steering_state = self.start_steering found = 0 cost_list = [[0, [x, y], heading_state, steering_state]] next_state = cost_list path_discrete = list([]) # Initialize discrete path path_tree.append(path) path = [] # Initialize continuous path step = 1 bz_last = Bezier(next_state[1], goal, next_state[2], self.goal_heading, self.start_steering, self.mapsize, self.car_length, self.car_width, self.wheelbase, self.tolerance * 3, "Found") bz_last = bz_last.calculation() path.append(bz_last) return path, path_tree
from IPython.core.debugger import set_trace from importlib import reload # import sys # sys.path.append("../") import params as P from dynamics import Dynamics from animation import Animation from plotData import plotData from ekf_slam import EKF_SLAM from copy import deepcopy dynamics = Dynamics() dataPlot = plotData() ekf_slam = EKF_SLAM() animation = Animation(ekf_slam) estimate = np.array([]) actual = np.array([]) t = P.t_start while t < P.t_end: t_next_plot = t + P.t_plot while t < t_next_plot: t = t + P.Ts vc = 1.25 + 0.5 * np.cos(2 * np.pi * (0.2) * t) omegac = -0.5 + 0.2 * np.cos(2 * np.pi * (0.6) * t) noise_v = vc + np.random.normal(
# arguments n = 10 n_unstable = 3 n_r = 8 # Generate Dynamics Matrices while True: A = np.random.randn(n, n) lmbda = npl.eig(A)[0] if sum(lmbda >= 0) == n_unstable: break B = np.vstack([[1, 0], np.zeros([n - 2, 2]), [0, 1]]) dyn_f = Dynamics(A, B) dyn_r = reduceDynamics(dyn_f, n_r, debug=0) # Setup Simulation parameters x0 = np.zeros(n) x0[-1] = 5 tf = 2 dt = 0.01 t_arr = np.arange(0, tf, dt) # simulate full dynamics x_arr = np.zeros([n, len(t_arr)]) x = x0 for i, t in enumerate(t_arr): dx = dyn_f.A @ x
def sample_dist(state, actions): global hyperparameters samples = [] dynamics = Dynamics() # dynamics.fit(state,actions) for traj_no in range(state.shape[0]): # trajectory = 6 dynamics = Dynamics() traj_states = state[traj_no, :, :] traj_actions = actions[traj_no, :, :] vals, acts = getPreviousSA(traj_no, traj_states, traj_actions) T, dx = traj_states.shape du = traj_actions.shape[1] hyperparameters = { 'wx': [1 / float(dx) for i in range(dx)], 'wu': [1 / float(du) for i in range(du)] } eta = 1e-16 dynamics.fit(vals, acts) prev_traj_dist = init_traj_dist(traj_states, traj_actions, dynamics, hyperparameters) traj_dist = prev_traj_dist prev_mu, prev_sigma = forward(prev_traj_dist, dynamics) prev_eta = -np.Inf min_eta = prev_eta _MAX_ITER = 5 for iter in range(_MAX_ITER): # Collect samples in simulation for sample in range(10): s, a = get_sample(traj_dist, traj_no) push_sample(traj_no, s, a) vals, acts = getPreviousSA(traj_no, traj_states, traj_actions) dynamics.fit(vals, acts, .01) traj_dist, new_eta = backward(traj_states, traj_actions, dynamics, eta, hyperparameters) print(new_eta) print('try again') mu, sigma = forward(traj_dist, dynamics) if new_eta > prev_eta: min_eta = new_eta # dynamics.fit(new_mu,new_sigma) # # TODO: calculate KL divergence between new traj_dist and prev_traj_dist # # check constraint, that kl_div <= _THRESHOLD # kl_div = calculate_KL_div(mu, prev_mu, traj_dist, prev_traj_dist) # print(kl_div) # if kl_div <= _THRESHOLD: # break prev_traj_dist = traj_dist #Take initial sample samples = np.array([ -np.random.multivariate_normal(mu[t], sigma[t], 1).flatten() for t in range(T) ]) commands = samples[:, 28:] # raw_input() f = open('trajectories/target/Traj{}pred.txt'.format(traj_no + 1), 'w') print('here') for act in commands: f.write("{}\n".format(" ".join(str(x) for x in act.flatten()))) f.close() return samples
import numpy as np import params from dynamics import Dynamics from quadrotor_viewer import QuadRotor_Viewer from scipy.spatial.transform import Rotation from mpc import MPC, NMPC, LNMPC from data_viewer import data_viewer import time if __name__ == "__main__": dynamics = Dynamics(params.dt) # viewer = QuadRotor_Viewer() data_view = data_viewer() A, B = dynamics.get_SS(dynamics.state) #LNMPC doesn't seem to work any better # controller = MPC(A, B, params.u_max, params.u_min, T=params.T) #Typicall MPC controller = LNMPC( A, B, params.u_max, params.u_min, T=params.T ) #Non-linear model predictive control relinearizing about the current state # controller = NMPC(params.nu_max, params.nu_min, T = params.T) #Way to slow t0 = params.t0 F_eq = params.mass * 9.81 T_eq = 0.0 u_eq = np.array([F_eq, T_eq, T_eq, T_eq]) xr = np.array([ 5.0, -5.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.deg2rad(0), 0.0, 0.0, 0.0 ])
exec('from ' + sys.argv[1][:-3] + ' import HP, PM') args = HP() pm = PM() print(vars_stat(pm)) data = Dataset(args.dtFilename, args.batchsize) data_W = sum(1 for line in open(args.dtVocname) if line.rstrip()) model = LDA(data.n_tr, data_W, args.K, args.alpha, args.beta, args.sigma, args.n_gsamp) model.set_holdout_logperp(args.perpType, data.ho_train_cts, data.ho_test_cts, args.n_window) theta = args.beta + args.sigma * np.random.normal(size=(args.M, args.K, data_W)) theta_tf = tf.Variable(theta) grads_tf = tf.placeholder(dtype=theta.dtype, shape=theta.shape) op_samples, dninfo = Dynamics(pm.dnType, pm).evolve(theta_tf, L_grad_logp=grads_tf) tr_times = [] with tf.Session() as sess: tf.global_variables_initializer().run() theta_smp, theta_par = zip( *sess.run([dninfo.L_samples, dninfo.L_particles]))[0] for i in range(args.n_round): t_start = time.time() for j in range(args.n_iter): tr_train_cts, tr_test_cts = data.get_batch() grads = model.get_grad_logp(tr_train_cts, theta=theta_par) if j == args.n_iter - 1: break theta_par = sess.run([op_samples, dninfo.L_particles], {grads_tf: grads})[1][0] theta_smp, theta_par = zip(
import sys sys.path.append('..') from cost import CostFunctor from nempc import NEMPC from dynamics import Dynamics import numpy as np import matplotlib.pyplot as plt cost_fn = CostFunctor(use_penalty=False) cost_fn.Q = np.array([10,10,100,2.5,2.5,2,1,1,1]) u_eq = np.array([0.5,0,0,0]) ctrl = NEMPC(cost_fn, 9, 4, cost_fn.u_min, cost_fn.u_max, u_eq, horizon=10, population_size=500, num_parents=10, num_gens=200, mode='tournament', warm_start=True) dyn = Dynamics() t = 0.0 tf = 10.0 ts = 0.02 x = np.array([0,0,-5.0,0,0,0,0,0,0]) x_des = np.array([0,1,-6.0,0,0,0,0,0,0]) state_hist = [] input_hist = [] time_hist = [] while t < tf: print(t, end='\r') time_hist.append(t) state_hist.append(x)
class SFM(object): # pitch registers transpositionSemitones = 0 octaveNumber = 3 # tempo registers tempo = 60 currentBeatValue = 60.0/tempo currentBeat = 0 totalDuration = 0.0 # dynamics registers crescendoSpeed = 1.1 currentCrescendoSpeed = crescendoSpeed crescendoBeatsRemaining = 0.0 maximumAmplitude = 0.0 # tuple registers duration = 60.0/tempo amplitude = 1.0 decay = 1.0 # I/O input = "" ######################################################### # initialize Note, Rhythm, and Dynamics objects def __init__(self, SCALE): NOTES, FREQ_DICT = SCALE self.note = Note(NOTES, FREQ_DICT) self.rhythm = Rhythm() self.dynamics = Dynamics() self.equalizerFactor = { } self.equalizerFactor[-0.1] = 1.0 self.equalizerFactor[220] = 1.0 self.equalizerFactor[440] = 0.7 self.equalizerFactor[880] = 0.35 self.equalizerFactor[1760] = 0.15 self.equalizerFactor[3520] = 0.15 self.equalizerFactor[7040] = 0.15 self.equalizerFactor[14080] = 0.15 self.equalizerFactor[28160] = 0.15 self.equalizerFactor[56320] = 0.15 self.equalizerBreakPoints = [-0.1, 220, 440, 880, 1760, 3520, 7040, 14080, 28160, 56320] # self.tempo = 60 # self.currentBeatValue = 60.0/self.tempo # self.octaveNumber = 3 def equalize(self, freq): a, b = interval(freq, self.equalizerBreakPoints) f = mapInterval(freq, a,b, self.equalizerFactor[a], self.equalizerFactor[b]) # print freq, a, b, f return f # return tuple as string given frequency, # duration, decay, and amplitude def _tuple(self, freq, duration): output = `freq` output += " "+`duration` a = self.amplitude a = a*self.equalize(freq) output += " "+`a` output += " "+`self.decay` return output # return tuple as string from frequency of token # and root and suffix of token def tuple(self, freq, root, suffix): if suffix.find(",") > -1: thisDuration = self.duration*(1 - self.rhythm.breath) output = self._tuple(freq, thisDuration) output += "\n" output += self._tuple(0, self.duration - thisDuration) else: output = self._tuple(freq, self.duration) return output def updateRhythm(self, cmd): self.currentBeatValue, self.duration = self.rhythm.value(cmd, self) def emitNote(self, token): if self.crescendoBeatsRemaining > 0: self.amplitude = self.amplitude*self.currentCrescendoSpeed self.crescendoBeatsRemaining -= self.currentBeatValue freq, root, suffix = self.note.freq(token, self.transpositionSemitones, self.octaveNumber) self.output += self.tuple(freq, root, suffix) + "\n" # summary data self.totalDuration += self.duration self.currentBeat += self.currentBeatValue if self.amplitude > self.maximumAmplitude: self.maximumAmplitude = self.amplitude def executeCommand(self, ops): cmd = ops[0] # if cmd is a rhythm symbol, change value of duration register if self.rhythm.isRhythmOp(cmd): self.updateRhythm(cmd) # if cmd is a tempo command, change value of the tempo register if self.rhythm.isTempoOp(cmd): self.tempo = self.rhythm.tempo[cmd] self.updateRhythm(cmd) if cmd == "tempo": self.tempo = float(ops[1]) self.updateRhythm(cmd) # if cmd is an articulation command, change value of the decay register if self.rhythm.isArticulationOp(cmd): self.decay = self.rhythm.decay[cmd] # if cmd is a dynamics command, change value of the amplitude register if self.dynamics.isDynamicsConstant(cmd): self.amplitude = self.dynamics.value[cmd] # crescendo and decrescendo if cmd == "crescendo" or cmd == "cresc": self.crescendoBeatsRemaining = float(ops[1]) self.currentCrescendoSpeed = self.crescendoSpeed if cmd == "decrescendo" or cmd == "decresc": self.crescendoBeatsRemaining = float(ops[1]) self.currentCrescendoSpeed = 1.0/self.crescendoSpeed # pitch transposition if cmd == "octave": self.octaveNumber = int(ops[1]) if cmd == "transpose": self.transpositionSemitones = int(ops[1]) # pass special commands through if cmd[0] == '@': CMD = catList2(ops) CMD = CMD[:len(CMD)]+"\n" self.output += CMD # tuples: returns a string of tuples from input = solfa text def tuples(self): # split intput into list of tokens self.input = self.input.replace("\n", " ") tokens = self.input.split(" ") # make sure there are not empty list elements tokens = filter( lambda x: len(x), tokens) # initialize output self.output = "" for token in tokens: if self.note.isNote(token): self.emitNote(token) else: ops = token.split(":") ops = filter(lambda x: len(x) > 0, ops) if DEBUG == ON: self.output += "cmd: "+ `ops`+"\n" self.executeCommand(ops) return self.output
class Optimization: def __init__(self, config): self.physical_params = config["physical_params"] self.T = config["T"] self.dt = config["dt"] self.initial_state = config["xinit"] self.goal_state = config["xgoal"] self.ellipse_arc = config["ellipse_arc"] self.deviation_cost = config["deviation_cost"] self.Qf = config["Qf"] self.limits = config["limits"] self.n_state = 6 self.n_nominal_forces = 4 self.tire_model = config["tire_model"] self.initial_guess_config = config["initial_guess_config"] self.puddle_model = config["puddle_model"] self.force_constraint = config["force_constraint"] self.visualize_initial_guess = config["visualize_initial_guess"] self.dynamics = Dynamics(self.physical_params.lf, self.physical_params.lr, self.physical_params.m, self.physical_params.Iz, self.dt) def build_program(self): self.prog = MathematicalProgram() # Declare variables. state = self.prog.NewContinuousVariables(rows=self.T + 1, cols=self.n_state, name='state') nominal_forces = self.prog.NewContinuousVariables( rows=self.T, cols=self.n_nominal_forces, name='nominal_forces') steers = self.prog.NewContinuousVariables(rows=self.T, name="steers") slip_ratios = self.prog.NewContinuousVariables(rows=self.T, cols=2, name="slip_ratios") self.state = state self.nominal_forces = nominal_forces self.steers = steers self.slip_ratios = slip_ratios # Set the initial state. xinit = pack_state_vector(self.initial_state) for i, s in enumerate(xinit): self.prog.AddConstraint(state[0, i] == s) # Constrain xdot to always be at least some value to prevent numerical issues with optimizer. for i in range(self.T + 1): s = unpack_state_vector(state[i]) self.prog.AddConstraint(s["xdot"] >= self.limits.min_xdot) # Constrain slip ratio to be at least a certain magnitude. if i != self.T: self.prog.AddConstraint( slip_ratios[i, 0]**2.0 >= self.limits.min_slip_ratio_mag**2.0) self.prog.AddConstraint( slip_ratios[i, 1]**2.0 >= self.limits.min_slip_ratio_mag**2.0) # Control rate limits. for i in range(self.T - 1): ddelta = self.dt * (steers[i + 1] - steers[i]) f_dkappa = self.dt * (slip_ratios[i + 1, 0] - slip_ratios[i, 0]) r_dkappa = self.dt * (slip_ratios[i + 1, 1] - slip_ratios[i, 1]) self.prog.AddConstraint(ddelta <= self.limits.max_ddelta) self.prog.AddConstraint(ddelta >= -self.limits.max_ddelta) self.prog.AddConstraint(f_dkappa <= self.limits.max_dkappa) self.prog.AddConstraint(f_dkappa >= -self.limits.max_dkappa) self.prog.AddConstraint(r_dkappa <= self.limits.max_dkappa) self.prog.AddConstraint(r_dkappa >= -self.limits.max_dkappa) # Control value limits. for i in range(self.T): self.prog.AddConstraint(steers[i] <= self.limits.max_delta) self.prog.AddConstraint(steers[i] >= -self.limits.max_delta) # Add dynamics constraints by constraining residuals to be zero. for i in range(self.T): residuals = self.dynamics.nominal_dynamics(state[i], state[i + 1], nominal_forces[i], steers[i]) for r in residuals: self.prog.AddConstraint(r == 0.0) # Add the cost function. self.add_cost(state) # Add a different force constraint depending on the configuration. if self.force_constraint == ForceConstraint.NO_PUDDLE: self.add_no_puddle_force_constraints( state, nominal_forces, steers, self.tire_model.get_deterministic_model(), slip_ratios) elif self.force_constraint == ForceConstraint.MEAN_CONSTRAINED: self.add_mean_constrained() elif self.force_constraint == ForceConstraint.CHANCE_CONSTRAINED: self.add_chance_constraints() else: raise NotImplementedError("ForceConstraint type invalid.") return def constant_input_initial_guess(self, state, steers, slip_ratios, nominal_forces): # Guess the slip ratio as the minimum allowed value. gslip_ratios = np.tile( np.array([ self.limits.min_slip_ratio_mag, self.limits.min_slip_ratio_mag ]), (self.T, 1)) # Guess the slip angle as a linearly ramping steer that then becomes constant. # This is done by creating an array of values corresponding to end_delta then # filling in the initial ramp up phase. gsteers = self.initial_guess_config["end_delta"] * np.ones(self.T) igc = self.initial_guess_config for i in range(igc["ramp_steps"]): gsteers[i] = (i / igc["ramp_steps"]) * (igc["end_delta"] - igc["start_delta"]) # Create empty arrays for state and forces. gstate = np.empty((self.T + 1, self.n_state)) gstate[0] = pack_state_vector(self.initial_state) gforces = np.empty((self.T, 4)) all_slips = self.T * [None] # Simulate the dynamics for the initial guess differently depending on the force # constraint being used. if self.force_constraint == ForceConstraint.NO_PUDDLE: tire_model = self.tire_model.get_deterministic_model() for i in range(self.T): s = unpack_state_vector(gstate[i]) # Simulate the dynamics for one time step. gstate[i + 1], forces, slips = self.dynamics.simulate( gstate[i], gsteers[i], gslip_ratios[i, 0], gslip_ratios[i, 1], tire_model, self.dt) # Store the results. gforces[i] = pack_force_vector(forces) all_slips[i] = slips elif self.force_constraint == ForceConstraint.MEAN_CONSTRAINED or self.force_constraint == ForceConstraint.CHANCE_CONSTRAINED: # mean model is a function that maps (slip_ratio, slip_angle, x, y) -> (E[Fx], E[Fy]) mean_model = self.tire_model.get_mean_model( self.puddle_model.get_mean_fun()) for i in range(self.T): # Update the tire model based off the conditions of the world # at (x, y) s = unpack_state_vector(gstate[i]) tire_model = lambda slip_ratio, slip_angle: mean_model( slip_ratio, slip_angle, s["X"], s["Y"]) # Simulate the dynamics for one time step. gstate[i + 1], forces, slips = self.dynamics.simulate( gstate[i], gsteers[i], gslip_ratios[i, 0], gslip_ratios[i, 1], tire_model, self.dt) # Store the results. gforces[i] = pack_force_vector(forces) all_slips[i] = slips else: raise NotImplementedError( "Invalid value for self.force_constraint") # Declare array for the initial guess and set the values. initial_guess = np.empty(self.prog.num_vars()) self.prog.SetDecisionVariableValueInVector(state, gstate, initial_guess) self.prog.SetDecisionVariableValueInVector(steers, gsteers, initial_guess) self.prog.SetDecisionVariableValueInVector(slip_ratios, gslip_ratios, initial_guess) self.prog.SetDecisionVariableValueInVector(nominal_forces, gforces, initial_guess) if self.visualize_initial_guess: # TODO: reorganize visualizations psis = gstate[:, 2] xs = gstate[:, 4] ys = gstate[:, 5] fig, ax = plt.subplots() if self.force_constraint != ForceConstraint.NO_PUDDLE: plot_puddles(ax, self.puddle_model) plot_ellipse_arc(ax, self.ellipse_arc) plot_planned_trajectory(ax, xs, ys, psis, gsteers, self.physical_params) # Plot the slip ratios/angles plot_slips(all_slips) plot_forces(gforces) if self.force_constraint == ForceConstraint.NO_PUDDLE: generate_animation(xs, ys, psis, gsteers, self.physical_params, self.dt, puddle_model=None) else: generate_animation(xs, ys, psis, gsteers, self.physical_params, self.dt, puddle_model=self.puddle_model) return initial_guess def solve_program(self): # Generate initial guess initial_guess = self.constant_input_initial_guess( self.state, self.steers, self.slip_ratios, self.nominal_forces) # Solve the problem. solver = SnoptSolver() result = solver.Solve(self.prog, initial_guess) solver_details = result.get_solver_details() print("Exit flag: " + str(solver_details.info)) self.visualize_results(result) def visualize_results(self, result): state_res = result.GetSolution(self.state) psis = state_res[:, 2] xs = state_res[:, 4] ys = state_res[:, 5] steers = result.GetSolution(self.steers) fig, ax = plt.subplots() plot_ellipse_arc(ax, self.ellipse_arc) plot_puddles(ax, self.puddle_model) plot_planned_trajectory(ax, xs, ys, psis, steers, self.physical_params) generate_animation(xs, ys, psis, steers, self.physical_params, self.dt, puddle_model=self.puddle_model) plt.show() def add_cost(self, state): # Add the final state cost function. diff_state = pack_state_vector(self.goal_state) - state[-1] self.prog.AddQuadraticCost(diff_state.T @ self.Qf @ diff_state) # Get the approx distance function for the ellipse. fun = self.ellipse_arc.approx_dist_fun() for i in range(self.T): s = unpack_state_vector(state[i]) self.prog.AddCost(self.deviation_cost * fun(s["X"], s["Y"])) def add_mean_constrained(self): # mean model is a function that maps (slip_ratio, slip_angle, x, y) -> (E[Fx], E[Fy]) mean_model = self.tire_model.get_mean_model( self.puddle_model.get_mean_fun()) for i in range(self.T): # Get slip angles and slip ratios. s = unpack_state_vector(self.state[i]) F = unpack_force_vector(self.nominal_forces[i]) # get the tire model at this position in space. tire_model = lambda slip_ratio, slip_angle: mean_model( slip_ratio, slip_angle, s["X"], s["Y"]) # Unpack values delta = self.steers[i] alpha_f, alpha_r = self.dynamics.slip_angles( s["xdot"], s["ydot"], s["psidot"], delta) kappa_f = self.slip_ratios[i, 0] kappa_r = self.slip_ratios[i, 1] # Compute expected forces. E_Ffx, E_Ffy = tire_model(kappa_f, alpha_f) E_Frx, E_Fry = tire_model(kappa_r, alpha_r) # Constrain these expected force values to be equal to the nominal # forces in the optimization. self.prog.AddConstraint(E_Ffx - F["f_long"] == 0.0) self.prog.AddConstraint(E_Ffy - F["f_lat"] == 0.0) self.prog.AddConstraint(E_Frx - F["r_long"] == 0.0) self.prog.AddConstraint(E_Fry - F["r_lat"] == 0.0) def add_chance_constrained(self): pass def add_no_puddle_force_constraints(self, state, forces, steers, pacejka_model, slip_ratios): """ Args: prog: state: forces: steers: pacejka_model: function with signature (slip_ratio, slip_angle) using pydrake.math """ for i in range(self.T): # Get slip angles and slip ratios. s = unpack_state_vector(state[i]) F = unpack_force_vector(forces[i]) delta = steers[i] alpha_f, alpha_r = self.dynamics.slip_angles( s["xdot"], s["ydot"], s["psidot"], delta) kappa_f = slip_ratios[i, 0] kappa_r = slip_ratios[i, 1] Ffx, Ffy = pacejka_model(kappa_f, alpha_f) Frx, Fry = pacejka_model(kappa_r, alpha_r) # Constrain the values from the pacejka model to be equal # to the desired values in the optimization. self.prog.AddConstraint(Ffx - F["f_long"] == 0.0) self.prog.AddConstraint(Ffy - F["f_lat"] == 0.0) self.prog.AddConstraint(Frx - F["r_long"] == 0.0) self.prog.AddConstraint(Fry - F["r_lat"] == 0.0)
import hb_msgs.msg import numpy as np from hb_common.datatypes import Params, Command, ROSParameterException from hb_common.helpers import saturate from dynamics import Dynamics # this is just the dynamic parameters from the ROS param server params = Params._from_rosparam() #this is artifically setting all our states and inputs to 0.5 to test our dynamics function state = np.ones((6,1))*0.5 command = Command(left=0.5, right=0.5) #making a dynamics object d = Dynamics() deriv_of_state = d.dynamics(params, state, command) print("\n\n\n\nThis is the derivative of our state!!!") print(deriv_of_state) print("This is the derivative of our state!!!\n\n\n\n") ## for this input: #state = np.ones((6,1))*0.5 #command = Command(left=0.5, right=0.5)
class WrappedEnv(gym.Env): def __init__(self, sess=None, env_name=None, feature_dim=None, encoder_gamma=None, encoder_hidden_size=None, dynamics_hidden_size=None, invdyn_hidden_size=None, encoder_lr=None, dynamics_lr=None, invdyn_lr=None): super(WrappedEnv, self).__init__() self._sess = sess self._env = gym.make(env_name) self._state_dim = self._env.observation_space.shape[0] self._action_dim = self._env.action_space.shape[0] self._feature_dim = feature_dim self._encoder_gamma = encoder_gamma self._experience_buffer_size = 50000 self.observation_space = spaces.Box( np.array([-np.inf] * self._feature_dim), np.array([np.inf] * self._feature_dim)) self.action_space = self._env.action_space self._num_hidden_layers = 2 self._encoder_hidden_sizes = [encoder_hidden_size ] * self._num_hidden_layers self._dynamics_hidden_sizes = [dynamics_hidden_size ] * self._num_hidden_layers self._invdyn_hidden_sizes = [invdyn_hidden_size ] * self._num_hidden_layers self._encoder = Encoder(sess=self._sess, input_dim=self._state_dim, output_dim=feature_dim, hidden_sizes=self._encoder_hidden_sizes, learning_rate=encoder_lr) self._dynamics = Dynamics(sess=self._sess, state_dim=feature_dim, action_dim=self._action_dim, hidden_sizes=self._dynamics_hidden_sizes, learning_rate=dynamics_lr) self._inv_dynamics = InverseDynamics( sess=self._sess, state_dim=feature_dim, action_dim=self._action_dim, hidden_sizes=self._invdyn_hidden_sizes, learning_rate=invdyn_lr) self._state = self._env.reset() def step(self, action): next_state, reward, terminal, info = self._env.step(action) encoded_next_state = self._encoder.predict(state=next_state) batch_state = self._state batch_action = action batch_reward = reward batch_next_state = next_state batch_encoded_state = self._encoder.predict(state=batch_state) batch_encoded_next_state = self._encoder.predict( state=batch_next_state) self._dynamics.update(state=batch_encoded_state, action=batch_action, next_state=batch_encoded_next_state) self._inv_dynamics.update(state=batch_encoded_state, action=batch_action, next_state=batch_encoded_next_state) dyn_gradients = self._dynamics.calc_gradients( state=batch_encoded_state, action=action) invdyn_gradients = self._inv_dynamics.calc_gradients( state=batch_encoded_state, next_state=batch_encoded_next_state) encoder_gradients = self._encoder.calc_gradients(state=batch_state) dyn_state_gradients = dyn_gradients[:self._feature_dim] dyn_action_gradients = dyn_gradients[self._feature_dim:] invdyn_state_gradients = invdyn_gradients[:self._feature_dim] invdyn_nstate_gradients = invdyn_gradients[self._feature_dim:] encoder_optim_grads = np.dot( invdyn_state_gradients, invdyn_nstate_gradients) * dyn_state_gradients self._encoder.update(state=batch_state, optim_grads=encoder_optim_grads) self._state = next_state return encoded_next_state, reward, terminal, info def reset(self): self._state = self._env.reset() encoded_state = self._encoder.predict(state=self._state) return encoded_state def render(self): self._env.render()
two_link_robot_model_file = model_folder + 'two_link_robot_model.pkl' # Create joint variables and define their relations q0, q1, q2, q3, q4, q5, q6, q7, q8, q9 = new_sym('q:10') # q3 = -q2 + q8 # q9 = -q8 + q2 robot_def = RobotDef([(0, -1, [1], 0, 0, 0, 0), (1, 0, [2], 0, 0, -0.21537, q1), (2, 1, [3], 0, -sympy.pi / 2, 0, q2 + sympy.pi / 2)], dh_convention='mdh', friction_type=['Coulomb', 'viscous', 'offset']) geom = Geometry(robot_def) dyn = Dynamics(robot_def, geom) #if not os.path.exists(two_link_robot_model_file): with open(two_link_robot_model_file, 'wr') as f: pickle.dump(dyn.H_b, f) H_b = None if os.path.exists(two_link_robot_model_file): with open(two_link_robot_model_file, 'rb') as f: H_b = pickle.load(f) print(H_b - dyn.H_b) # import io, json # with io.open('two_link_robot_model_file', 'w', encoding='utf-8') as f: # f.write(json.dumps(data, ensure_ascii=False))
reload(dynamics) from dynamics import Dynamics import animation reload(animation) from animation import Animation # import plotData # reload(plotData) # from plotData import plotData import mcl reload(mcl) from mcl import MCL dynamics = Dynamics() animation = Animation() # dataPlot = plotData() mcl = MCL() states = dynamics.states() mu = mcl.get_mu() std = mcl.get_std() t = P.t_start while t < P.t_end: t_next_plot = t + P.t_plot while t < t_next_plot: t = t + P.Ts if t >= 1: mcl.change = True
# The Animation.py file is kept in the parent directory, # so the parent directory path needs to be added. sys.path.append('..') from dynamics import Dynamics from animation import Animation t_start = 0.0 # Start time of simulation t_end = 50.0 # End time of simulation t_Ts = P.Ts # Simulation time step t_elapse = 0.1 # Simulation time elapsed between each iteration t_pause = 0.01 # Pause between each iteration user_input = Sliders() # Instantiate Sliders class simAnimation = Animation() # Instantiate Animate class dynam = Dynamics() # Instantiate Dynamics class t = t_start # Declare time variable to keep track of simulation time elapsed while t < t_end: plt.ion() # Make plots interactive plt.figure( user_input.fig.number) # Switch current figure to user_input figure plt.pause(0.001) # Pause the simulation to detect user input # The dynamics of the model will be propagated in time by t_elapse # at intervals of t_Ts. t_temp = t + t_elapse while t < t_temp: dynam.propagateDynamics( # Propagate the dynamics of the model in time
# so the parent directory path needs to be added. sys.path.append('..') from dynamics import Dynamics from animation import Animation t_start = 0.0 # Start time of simulation t_end = 60.0 # End time of simulation t_Ts = P.Ts # Simulation time step t_elapse = 0.01 # Simulation time elapsed between each iteration t_pause = 0.01 # Pause between each iteration sig_gen = Signals() # Instantiate Signals class plotGen = plotGenerator() # Instantiate plotGenerator class # simAnimation = Animation() # Instantiate Animate class ctrl = controllerSS() dynam = Dynamics() # Instantiate Dynamics class t = t_start # Declare time variable to keep track of simulation time elapsed while t < t_end: ref_input = sig_gen.getRefInputs(t) # The dynamics of the model will be propagated in time by t_elapse # at intervals of t_Ts. t_temp = t + t_elapse while t < t_temp: states = dynam.States() # Get current states u = ctrl.getForces(ref_input, states) # Calculate the forces xhat = ctrl.getObsStates() # Get xhat