def do_ppo(args, start_theta, parent_this_run_dir, full_space_save_dir): """ Runs the test """ logger.log(f"#######CMA and then PPO TRAIN: {args}") this_conti_ppo_run_dir = get_ppo_part(parent_this_run_dir) log_dir = get_log_dir(this_conti_ppo_run_dir) conti_ppo_save_dir = get_save_dir(this_conti_ppo_run_dir) logger.configure(log_dir) full_param_traj_dir_path = get_full_params_dir(this_conti_ppo_run_dir) if os.path.exists(full_param_traj_dir_path): import shutil shutil.rmtree(full_param_traj_dir_path) os.makedirs(full_param_traj_dir_path) if os.path.exists(conti_ppo_save_dir): import shutil shutil.rmtree(conti_ppo_save_dir) os.makedirs(conti_ppo_save_dir) def make_env(): env_out = gym.make(args.env) env_out.env.disableViewer = True env_out.env.visualize = False env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) if args.normalize: env = VecNormalize(env) model = PPO2.load(f"{full_space_save_dir}/ppo2") model.set_from_flat(start_theta) if args.normalize: env.load_running_average(full_space_save_dir) model.set_env(env) run_info = {"run_num": args.run_num, "env_id": args.env, "full_param_traj_dir_path": full_param_traj_dir_path} # model = PPO2(policy=policy, env=env, n_steps=args.n_steps, nminibatches=args.nminibatches, lam=0.95, gamma=0.99, # noptepochs=10, # ent_coef=0.0, learning_rate=3e-4, cliprange=0.2, optimizer=args.optimizer) model.tell_run_info(run_info) episode_returns = model.learn(total_timesteps=args.ppo_num_timesteps) model.save(f"{conti_ppo_save_dir}/ppo2") env.save_running_average(conti_ppo_save_dir) return episode_returns, full_param_traj_dir_path
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() args, cma_unknown_args = common_arg_parser.parse_known_args() this_run_dir = get_dir_path_for_this_run(args) plot_dir_alg = get_plot_dir(args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir, params_scope="pi") save_dir = get_save_dir(this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) if not os.path.exists(plot_dir_alg): os.makedirs(plot_dir_alg) final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] def make_env(): env_out = gym.make(args.env) env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) if args.normalize: env = VecNormalize(env) model = PPO2.load(f"{save_dir}/ppo2") # this also loads V function model.set_pi_from_flat(final_params) if args.normalize: env.load_running_average(save_dir) obz_tensor = model.act_model.fake_input_tensor some_neuron = model.act_model.policy_neurons[2][-1] grads = tf.gradients(tf.math.negative(some_neuron), obz_tensor) grads = list(zip(grads, obz_tensor)) trainer = tf.train.AdamOptimizer(learning_rate=0.01, epsilon=1e-5) train_op = trainer.apply_gradients(grads) for i in range(10000): obz, _ = model.sess.run([obz_tensor, train_op])
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() cma_args, cma_unknown_args = common_arg_parser.parse_known_args() run_nums = cma_args.run_nums_to_check run_nums = [int(run_num) for run_num in run_nums.split(":")] final_params_list = [] start_params_list = [] for run_num in run_nums: cma_args.run_num = run_num if os.path.exists(get_dir_path_for_this_run(cma_args)): this_run_dir = get_dir_path_for_this_run(cma_args) plot_dir_alg = get_plot_dir(cma_args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir( this_run_dir, params_scope="pi") save_dir = get_save_dir(this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) if not os.path.exists(plot_dir_alg): os.makedirs(plot_dir_alg) start_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_start") start_params = pd.read_csv(start_file, header=None).values[0] final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] final_params_list.append(final_params) start_params_list.append(start_params) cma_args.run_num += 1 final_params_distances = [] for i in range(len(final_params_list)): for j in range(i + 1, len(final_params_list)): final_params_distances.append( LA.norm(final_params_list[i] - final_params_list[j], ord=2)) plot_dir = get_plot_dir(cma_args) if not os.path.exists(plot_dir): os.makedirs(plot_dir) np.savetxt(f"{plot_dir}/final_params_distances.txt", final_params_distances, delimiter=",")
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() cma_args, cma_unknown_args = common_arg_parser.parse_known_args() origin_name = "final_param" this_run_dir = get_dir_path_for_this_run(cma_args) plot_dir_alg = get_plot_dir(cma_args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir, params_scope="pi") save_dir = get_save_dir(this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) if not os.path.exists(plot_dir_alg): os.makedirs(plot_dir_alg) start_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_start") start_params = pd.read_csv(start_file, header=None).values[0] ''' ========================================================================================== get the pc vectors ========================================================================================== ''' pca_indexes = cma_args.other_pca_index pca_indexes = [int(pca_index) for pca_index in pca_indexes.split(":")] n_comp_to_project_on = pca_indexes result = do_pca(n_components=cma_args.n_components, traj_params_dir_name=traj_params_dir_name, intermediate_data_dir=intermediate_data_dir, use_IPCA=cma_args.use_IPCA, chunk_size=cma_args.chunk_size, reuse=True) logger.debug("after pca") if origin_name == "final_param": origin_param = result["final_params"] elif origin_name == "start_param": origin_param = start_params else: origin_param = result["mean_param"] proj_coords = project(result["pcs_components"], pcs_slice=n_comp_to_project_on, origin_name=origin_name, origin_param=origin_param, IPCA_chunk_size=cma_args.chunk_size, traj_params_dir_name=traj_params_dir_name, intermediate_data_dir=intermediate_data_dir, n_components=cma_args.n_components, reuse=True) ''' ========================================================================================== eval all xy coords ========================================================================================== ''' other_pcs_plot_dir = get_other_pcs_plane_plot_dir(plot_dir_alg, pca_indexes) if not os.path.exists(other_pcs_plot_dir): os.makedirs(other_pcs_plot_dir) plot_3d_trajectory_path_only( other_pcs_plot_dir, f"{pca_indexes}_final_origin_3d_path_plot", proj_coords, explained_ratio=result["explained_variance_ratio"][pca_indexes])
def train(args): """ Runs the test """ args, argv = mujoco_arg_parser().parse_known_args(args) logger.log(f"#######TRAIN: {args}") args.alg = "ppo2" this_run_dir = get_dir_path_for_this_run(args) if os.path.exists(this_run_dir): import shutil shutil.rmtree(this_run_dir) os.makedirs(this_run_dir) log_dir = get_log_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) logger.configure(log_dir) def make_env(): env_out = gym.make(args.env) env_out.env.visualize = False env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) env.envs[0].env.env.disableViewer = True set_global_seeds(args.seed) env.envs[0].env.env.seed(args.seed) if args.normalize: env = VecNormalize(env) policy = MlpPolicy # extra run info I added for my purposes full_param_traj_dir_path = get_full_params_dir(this_run_dir) if os.path.exists(full_param_traj_dir_path): import shutil shutil.rmtree(full_param_traj_dir_path) os.makedirs(full_param_traj_dir_path) if os.path.exists(save_dir): import shutil shutil.rmtree(save_dir) os.makedirs(save_dir) run_info = { "run_num": args.run_num, "env_id": args.env, "full_param_traj_dir_path": full_param_traj_dir_path, "state_samples_to_collect": args.state_samples_to_collect } model = PPO2(policy=policy, env=env, n_steps=args.n_steps, nminibatches=args.nminibatches, lam=0.95, gamma=0.99, noptepochs=10, ent_coef=0.0, learning_rate=3e-4, cliprange=0.2, optimizer=args.optimizer, seed=args.seed) model.tell_run_info(run_info) model.learn(total_timesteps=args.num_timesteps) model.save(f"{save_dir}/ppo2") if args.normalize: env.save_running_average(save_dir)
def visualize_augment_experiment(augment_num_timesteps, top_num_to_include_slice, augment_seed, augment_run_num, network_size, policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, learning_rate, additional_note, result_dir, lagrangian_inds_to_include=None): args = AttributeDict() args.normalize = True args.num_timesteps = augment_num_timesteps args.run_num = augment_run_num args.alg = "ppo2" args.seed = augment_seed logger.log(f"#######TRAIN: {args}") # non_linear_global_dict timestamp = get_time_stamp('%Y_%m_%d_%H_%M_%S') experiment_label = f"learning_rate_{learning_rate}timestamp_{timestamp}_augment_num_timesteps{augment_num_timesteps}" \ f"_top_num_to_include{top_num_to_include_slice.start}_{top_num_to_include_slice.stop}" \ f"_augment_seed{augment_seed}_augment_run_num{augment_run_num}_network_size{network_size}" \ f"_policy_num_timesteps{policy_num_timesteps}_policy_run_num{policy_run_num}_policy_seed{policy_seed}" \ f"_eval_seed{eval_seed}_eval_run_num{eval_run_num}_additional_note_{additional_note}" if policy_env == "DartWalker2d-v1": entry_point = 'gym.envs.dart:DartWalker2dEnv_aug_input' elif policy_env == "DartHopper-v1": entry_point = 'gym.envs.dart:DartHopperEnv_aug_input' elif policy_env == "DartHalfCheetah-v1": entry_point = 'gym.envs.dart:DartHalfCheetahEnv_aug_input' elif policy_env == "DartSnake7Link-v1": entry_point = 'gym.envs.dart:DartSnake7LinkEnv_aug_input' else: raise NotImplemented() this_run_dir = get_experiment_path_for_this_run( entry_point, args.num_timesteps, args.run_num, args.seed, learning_rate=learning_rate, top_num_to_include=top_num_to_include_slice, result_dir=result_dir, network_size=network_size) full_param_traj_dir_path = get_full_params_dir(this_run_dir) log_dir = get_log_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) create_dir_remove(this_run_dir) create_dir_remove(full_param_traj_dir_path) create_dir_remove(save_dir) create_dir_remove(log_dir) logger.configure(log_dir) # note this is only linear if lagrangian_inds_to_include is None: linear_top_vars_list = read_linear_top_var(policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, additional_note) # keys_to_include = ["COM", "M", "Coriolis", "total_contact_forces_contact_bodynode", # "com_jacobian", "contact_bodynode_jacobian"] keys_to_include = ["COM", "M", "Coriolis", "com_jacobian"] # lagrangian_inds_to_include = linear_top_vars_list[top_num_to_include_slice] lagrangian_inds_to_include = get_wanted_lagrangians( keys_to_include, linear_top_vars_list, top_num_to_include_slice) with open(f"{log_dir}/lagrangian_inds_to_include.json", 'w') as fp: json.dump(lagrangian_inds_to_include, fp) args.env = f'{experiment_label}_{entry_point}-v1' register(id=args.env, entry_point=entry_point, max_episode_steps=1000, kwargs={"lagrangian_inds_to_include": lagrangian_inds_to_include}) def make_env(): env_out = gym.make(args.env) env_out.env.visualize = False env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) walker_env = env.envs[0].env.env walker_env.disableViewer = True if args.normalize: env = VecNormalize(env) policy = MlpPolicy # extra run info I added for my purposes run_info = { "run_num": args.run_num, "env_id": args.env, "full_param_traj_dir_path": full_param_traj_dir_path } layers = [network_size, network_size] set_global_seeds(args.seed) walker_env.seed(args.seed) policy_kwargs = {"net_arch": [dict(vf=layers, pi=layers)]} model = PPO2(policy=policy, env=env, n_steps=4096, nminibatches=64, lam=0.95, gamma=0.99, noptepochs=10, ent_coef=0.0, learning_rate=learning_rate, cliprange=0.2, optimizer='adam', policy_kwargs=policy_kwargs, seed=args.seed) model.tell_run_info(run_info) model.learn(total_timesteps=args.num_timesteps, seed=args.seed) model.save(f"{save_dir}/ppo2") if args.normalize: env.save_running_average(save_dir) return log_dir
def visualize_policy_and_collect_COM( augment_num_timesteps, top_num_to_include_slice, augment_seed, augment_run_num, network_size, policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, learning_rate, additional_note, metric_param): result_dir = get_result_dir(policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, additional_note, metric_param) args = AttributeDict() args.normalize = True args.num_timesteps = augment_num_timesteps args.run_num = augment_run_num args.alg = "ppo2" args.seed = augment_seed logger.log(f"#######VISUALIZE: {args}") # non_linear_global_dict linear_global_dict, non_linear_global_dict, lagrangian_values, input_values, layers_values, all_weights = read_all_data( policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, additional_note=additional_note) timestamp = get_time_stamp('%Y_%m_%d_%H_%M_%S') experiment_label = f"learning_rate_{learning_rate}timestamp_{timestamp}_augment_num_timesteps{augment_num_timesteps}" \ f"_top_num_to_include{top_num_to_include_slice.start}_{top_num_to_include_slice.stop}" \ f"_augment_seed{augment_seed}_augment_run_num{augment_run_num}_network_size{network_size}" \ f"_policy_num_timesteps{policy_num_timesteps}_policy_run_num{policy_run_num}_policy_seed{policy_seed}" \ f"_eval_seed{eval_seed}_eval_run_num{eval_run_num}_additional_note_{additional_note}" entry_point = 'gym.envs.dart:DartWalker2dEnv_aug_input' this_run_dir = get_experiment_path_for_this_run( entry_point, args.num_timesteps, args.run_num, args.seed, learning_rate=learning_rate, top_num_to_include=top_num_to_include_slice, result_dir=result_dir, network_size=network_size, metric_param=metric_param) traj_params_dir_name = get_full_params_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) aug_plot_dir = get_aug_plot_dir(this_run_dir) + "_vis" final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] args.env = f'{experiment_label}_{entry_point}-v1' register(id=args.env, entry_point=entry_point, max_episode_steps=1000, kwargs={ 'linear_global_dict': linear_global_dict, 'non_linear_global_dict': non_linear_global_dict, 'top_to_include_slice': top_num_to_include_slice, 'aug_plot_dir': aug_plot_dir, "lagrangian_values": lagrangian_values, "layers_values": layers_values }) def make_env(): env_out = gym.make(args.env) env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) walker_env = env.envs[0].env.env walker_env.disableViewer = False if args.normalize: env = VecNormalize(env) set_global_seeds(args.seed) walker_env.seed(args.seed) model = PPO2.load(f"{save_dir}/ppo2", seed=augment_seed) model.set_pi_from_flat(final_params) if args.normalize: env.load_running_average(save_dir) sk = env.venv.envs[0].env.env.robot_skeleton lagrangian_values = {} obs = np.zeros((env.num_envs, ) + env.observation_space.shape) obs[:] = env.reset() env = VecVideoRecorder(env, aug_plot_dir, record_video_trigger=lambda x: x == 0, video_length=3000, name_prefix="vis_this_policy") lagrangian_values["M"] = [sk.M.reshape((-1, 1))] lagrangian_values["COM"] = [sk.C.reshape((-1, 1))] lagrangian_values["Coriolis"] = [sk.c.reshape((-1, 1))] lagrangian_values["q"] = [sk.q.reshape((-1, 1))] lagrangian_values["dq"] = [sk.dq.reshape((-1, 1))] contact_values = {} neuron_values = model.give_neuron_values(obs) raw_layer_values_list = [[neuron_value.reshape((-1, 1))] for neuron_value in neuron_values] env.render() ep_infos = [] steps_to_first_done = 0 first_done = False # epi_rew = 0 for _ in range(3000): actions = model.step(obs)[0] # yield neuron_values obs, rew, done, infos = env.step(actions) # epi_rew+= rew[0] if done and not first_done: first_done = True if not first_done: steps_to_first_done += 1 neuron_values = model.give_neuron_values(obs) for i, layer in enumerate(neuron_values): raw_layer_values_list[i].append(layer.reshape((-1, 1))) # fill_contacts_jac_dict(infos[0]["contacts"], contact_dict=contact_values, neuron_values=neuron_values) lagrangian_values["M"].append(sk.M.reshape((-1, 1))) lagrangian_values["q"].append(sk.q.reshape((-1, 1))) lagrangian_values["dq"].append(sk.dq.reshape((-1, 1))) lagrangian_values["COM"].append(sk.C.reshape((-1, 1))) lagrangian_values["Coriolis"].append(sk.c.reshape((-1, 1))) # env.render() # time.sleep(1) for info in infos: maybe_ep_info = info.get('episode') if maybe_ep_info is not None: ep_infos.append(maybe_ep_info) env.render() done = done.any() if done: episode_rew = safe_mean([ep_info['r'] for ep_info in ep_infos]) print(f'episode_rew={episode_rew}') # print(f'episode_rew={epi_rew}') # epi_rew = 0 obs = env.reset() #Hstack into a big matrix lagrangian_values["M"] = np.hstack(lagrangian_values["M"]) lagrangian_values["COM"] = np.hstack(lagrangian_values["COM"]) lagrangian_values["Coriolis"] = np.hstack(lagrangian_values["Coriolis"]) lagrangian_values["q"] = np.hstack(lagrangian_values["q"]) lagrangian_values["dq"] = np.hstack(lagrangian_values["dq"]) # for contact_body_name, l in contact_values.items(): # body_contact_dict = contact_values[contact_body_name] # for name, l in body_contact_dict.items(): # body_contact_dict[name] = np.hstack(body_contact_dict[name]) input_values = np.hstack(raw_layer_values_list[0]) layers_values = [ np.hstack(layer_list) for layer_list in raw_layer_values_list ][1:-2] # drop variance and inputs for i, com in enumerate(lagrangian_values["COM"]): plt.figure() plt.plot(np.arange(len(com)), com) plt.xlabel("time") plt.ylabel(f"COM{i}") plt.savefig(f"{aug_plot_dir}/COM{i}.jpg") plt.close()
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() cma_args, cma_unknown_args = common_arg_parser.parse_known_args() # origin = "final_param" origin = cma_args.origin this_run_dir = get_dir_path_for_this_run(cma_args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir) save_dir = get_save_dir( this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) cma_run_num, cma_intermediate_data_dir = generate_run_dir(get_cma_and_then_ppo_run_dir, intermediate_dir=intermediate_data_dir, n_comp=cma_args.n_comp_to_use, cma_steps=cma_args.cma_num_timesteps ) # cma_intermediate_data_dir = get_cma_and_then_ppo_run_dir(intermediate_dir = intermediate_data_dir, # n_comp = cma_args.n_comp_to_use, # cma_steps = cma_args.cma_num_timesteps, run_num=0) best_theta_file_name = "best theta from cma" # if not os.path.exists(f"{cma_intermediate_data_dir}/{best_theta_file_name}.csv") or \ # not os.path.exists(f"{cma_intermediate_data_dir}/opt_mean_path.csv"): ''' ========================================================================================== get the pc vectors ========================================================================================== ''' proj_or_not = (cma_args.n_comp_to_use == 2) result = do_pca(cma_args.n_components, cma_args.n_comp_to_use, traj_params_dir_name, intermediate_data_dir, proj=proj_or_not, origin=origin, use_IPCA=cma_args.use_IPCA, chunk_size=cma_args.chunk_size, reuse=True) logger.debug("after pca") ''' ========================================================================================== eval all xy coords ========================================================================================== ''' from stable_baselines.low_dim_analysis.common import plot_contour_trajectory, gen_subspace_coords,do_eval_returns, \ do_proj_on_first_n if origin=="final_param": origin_param = result["final_concat_params"] else: origin_param = result["mean_param"] logger.log("grab start params") start_file = get_full_param_traj_file_path(traj_params_dir_name, "start") start_params = pd.read_csv(start_file, header=None).values[0] starting_coord = do_proj_on_first_n(start_params, result["first_n_pcs"], origin_param) # starting_coord = np.random.rand(1, cma_args.n_comp_to_use) # starting_coord = (1/2*np.max(xcoordinates_to_eval), 1/2*np.max(ycoordinates_to_eval)) # use mean assert result["first_n_pcs"].shape[0] == cma_args.n_comp_to_use mean_rets, min_rets, max_rets, opt_path, opt_path_mean, best_theta = do_cma(cma_args, result["first_n_pcs"], origin_param, save_dir, starting_coord, cma_args.cma_var) np.savetxt(f"{cma_intermediate_data_dir}/opt_mean_path.csv", opt_path_mean, delimiter=',') np.savetxt(f"{cma_intermediate_data_dir}/{best_theta_file_name}.csv", best_theta, delimiter=',') episode_returns, conti_ppo_full_param_traj_dir_path = do_ppo(args=cma_args, start_theta=best_theta, parent_this_run_dir=cma_intermediate_data_dir, full_space_save_dir=save_dir) np.savetxt(f"{cma_intermediate_data_dir}/ppo part returns.csv", episode_returns, delimiter=',') plot_dir = get_plot_dir(cma_args) cma_and_then_ppo_plot_dir = get_cma_and_then_ppo_plot_dir(plot_dir, cma_args.n_comp_to_use, cma_run_num, cma_num_steps=cma_args.cma_num_timesteps, ppo_num_steps=cma_args.ppo_num_timesteps, origin=origin) if not os.path.exists(cma_and_then_ppo_plot_dir): os.makedirs(cma_and_then_ppo_plot_dir) if cma_args.n_comp_to_use <= 2: proj_coords = result["proj_coords"] assert proj_coords.shape[1] == 2 xcoordinates_to_eval, ycoordinates_to_eval = gen_subspace_coords(cma_args, np.vstack((proj_coords, opt_path_mean)).T) eval_returns = do_eval_returns(cma_args, intermediate_data_dir, result["first_n_pcs"], origin_param, xcoordinates_to_eval, ycoordinates_to_eval, save_dir, pca_center=origin, reuse=False) plot_contour_trajectory(cma_and_then_ppo_plot_dir, f"{origin}_origin_eval_return_contour_plot", xcoordinates_to_eval, ycoordinates_to_eval, eval_returns, proj_coords[:, 0], proj_coords[:, 1], result["explained_variance_ratio"][:2], num_levels=25, show=False, sub_alg_path=opt_path_mean) ret_plot_name = f"cma return on {cma_args.n_comp_to_use} dim space of real pca plane, " \ f"explained {np.sum(result['explained_variance_ratio'][:cma_args.n_comp_to_use])}" plot_cma_returns(cma_and_then_ppo_plot_dir, ret_plot_name, mean_rets, min_rets, max_rets, show=False) final_ppo_ep_name = f"final episodes returns after CMA" plot_2d(cma_and_then_ppo_plot_dir, final_ppo_ep_name, np.arange(len(episode_returns)), episode_returns, "num episode", "episode returns", False) # # if cma_args.n_comp_to_use == 2: # proj_coords = result["proj_coords"] # assert proj_coords.shape[1] == 2 # # xcoordinates_to_eval, ycoordinates_to_eval = gen_subspace_coords(cma_args, np.vstack((proj_coords, opt_path_mean)).T) # # eval_returns = do_eval_returns(cma_args, intermediate_data_dir, result["first_n_pcs"], origin_param, # xcoordinates_to_eval, ycoordinates_to_eval, save_dir, pca_center=origin, reuse=False) # # plot_contour_trajectory(cma_and_then_ppo_plot_dir, f"{origin}_origin_eval_return_contour_plot", xcoordinates_to_eval, # ycoordinates_to_eval, eval_returns, proj_coords[:, 0], proj_coords[:, 1], # result["explained_variance_ratio"][:2], # num_levels=25, show=False, sub_alg_path=opt_path_mean) skip_rows = lambda x: x%2 == 0 conti_ppo_params = get_allinone_concat_df(conti_ppo_full_param_traj_dir_path, index=0, skip_rows=skip_rows).values opt_mean_path_in_old_basis = [mean_projected_param.dot(result["first_n_pcs"]) + result["mean_param"] for mean_projected_param in opt_path_mean] distance_to_final = [LA.norm(opt_mean - result["final_concat_params"], ord=2) for opt_mean in np.vstack((opt_mean_path_in_old_basis, conti_ppo_params))] distance_to_final_plot_name = f"distance_to_final over generations " plot_2d(cma_and_then_ppo_plot_dir, distance_to_final_plot_name, np.arange(len(distance_to_final)), distance_to_final, "num generation", "distance_to_final", False)
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() cma_args, cma_unknown_args = common_arg_parser.parse_known_args() # origin = "final_param" origin_name = cma_args.origin this_run_dir = get_dir_path_for_this_run(cma_args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir, params_scope="pi") save_dir = get_save_dir(this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) pca_indexes = cma_args.other_pca_index pca_indexes = [int(pca_index) for pca_index in pca_indexes.split(":")] cma_run_num, cma_intermediate_data_dir = generate_run_dir( get_cma_and_then_ppo_run_dir, intermediate_dir=intermediate_data_dir, pca_indexes=pca_indexes, cma_steps=cma_args.cma_num_timesteps) best_theta_file_name = "best theta from cma" start_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_start") start_params = pd.read_csv(start_file, header=None).values[0] # if not os.path.exists(f"{cma_intermediate_data_dir}/{best_theta_file_name}.csv") or \ # not os.path.exists(f"{cma_intermediate_data_dir}/opt_mean_path.csv"): ''' ========================================================================================== get the pc vectors ========================================================================================== ''' result = do_pca(n_components=cma_args.n_components, traj_params_dir_name=traj_params_dir_name, intermediate_data_dir=intermediate_data_dir, use_IPCA=cma_args.use_IPCA, chunk_size=cma_args.chunk_size, reuse=True) logger.debug("after pca") ''' ========================================================================================== eval all xy coords ========================================================================================== ''' from stable_baselines.low_dim_analysis.common import plot_contour_trajectory, gen_subspace_coords, do_eval_returns, \ do_proj_on_first_n logger.log("grab start params") start_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_start") start_params = pd.read_csv(start_file, header=None).values[0] if origin_name == "final_param": origin_param = result["final_concat_params"] elif origin_name == "start_param": origin_param = start_params else: origin_param = result["mean_param"] pcs = result["pcs_components"] pcs_to_use = pcs[pca_indexes] starting_coord = do_proj_on_first_n(start_params, pcs_to_use, origin_param) plot_dir = get_plot_dir(cma_args) cma_and_then_ppo_plot_dir = get_cma_and_then_ppo_plot_dir( plot_dir, pca_indexes, cma_run_num, cma_num_steps=cma_args.cma_num_timesteps, ppo_num_steps=cma_args.ppo_num_timesteps, origin=origin_name) # starting_coord = (1/2*np.max(xcoordinates_to_eval), 1/2*np.max(ycoordinates_to_eval)) # use mean mean_rets, min_rets, max_rets, opt_path, opt_path_mean, best_pi_theta = do_cma( cma_args, pcs_to_use, origin_param, save_dir, starting_coord, cma_args.cma_var) # np.savetxt(f"{cma_intermediate_data_dir}/opt_mean_path.csv", opt_path_mean, delimiter=',') np.savetxt(f"{cma_intermediate_data_dir}/{best_theta_file_name}.csv", best_pi_theta, delimiter=',') ret_plot_name = f"cma return on {pca_indexes} dim space of real pca plane, " \ f"explained {np.sum(result['explained_variance_ratio'][pca_indexes])}" plot_cma_returns(cma_and_then_ppo_plot_dir, ret_plot_name, mean_rets, min_rets, max_rets, show=False) vf_final_file = get_full_param_traj_file_path(traj_params_dir_name, "vf_final") vf_final_params = pd.read_csv(vf_final_file, header=None).values[0] episode_returns, conti_ppo_full_param_traj_dir_path = do_ppo( args=cma_args, start_pi_theta=best_pi_theta, parent_this_run_dir=cma_intermediate_data_dir, full_space_save_dir=save_dir, vf_final_params=vf_final_params) # dump_row_write_csv(cma_intermediate_data_dir, episode_returns, "ppo part returns") np.savetxt(f"{cma_intermediate_data_dir}/ppo part returns.csv", episode_returns, delimiter=",") if not os.path.exists(cma_and_then_ppo_plot_dir): os.makedirs(cma_and_then_ppo_plot_dir) conti_ppo_params = get_allinone_concat_df( conti_ppo_full_param_traj_dir_path).values if len(pca_indexes) <= 2: pcs_to_plot_contour = pca_indexes if len(pcs_to_plot_contour) == 1: if pcs_to_plot_contour[0] + 1 < cma_args.n_components: pcs_to_plot_contour.append(pcs_to_plot_contour[0] + 1) else: pcs_to_plot_contour.append(pcs_to_plot_contour[0] - 1) proj_coords = project(result["pcs_components"], pcs_slice=pcs_to_plot_contour, origin_name=origin_name, origin_param=origin_param, IPCA_chunk_size=cma_args.chunk_size, traj_params_dir_name=traj_params_dir_name, intermediate_data_dir=intermediate_data_dir, n_components=cma_args.n_components, reuse=False) assert proj_coords.shape[1] == 2 if len(pca_indexes) == 1: opt_path_mean_2d = np.hstack( (opt_path_mean, np.zeros((1, len(opt_path_mean))).T)) else: opt_path_mean_2d = opt_path_mean xcoordinates_to_eval, ycoordinates_to_eval = gen_subspace_coords( cma_args, np.vstack((proj_coords, opt_path_mean_2d)).T) projected_after_ppo_params = do_proj_on_first_n( conti_ppo_params, pcs[pcs_to_plot_contour], origin_param) full_path = np.vstack((opt_path_mean_2d, projected_after_ppo_params)) eval_returns = do_eval_returns(cma_args, intermediate_data_dir, pcs[pcs_to_plot_contour], origin_param, xcoordinates_to_eval, ycoordinates_to_eval, save_dir, pca_center=origin_name, reuse=False) plot_contour_trajectory( cma_and_then_ppo_plot_dir, f"{origin_name}_origin_eval_return_contour_plot", xcoordinates_to_eval, ycoordinates_to_eval, eval_returns, proj_coords[:, 0], proj_coords[:, 1], result["explained_variance_ratio"][pcs_to_plot_contour], num_levels=25, show=False, sub_alg_path=full_path) final_ppo_ep_name = f"final episodes returns CMA PPO" plot_2d(cma_and_then_ppo_plot_dir, final_ppo_ep_name, np.arange(len(episode_returns)), episode_returns, "num episode", "episode returns", False) opt_mean_path_in_old_basis = [ mean_projected_param.dot(pcs_to_use) + result["mean_param"] for mean_projected_param in opt_path_mean ] distance_to_final = [ LA.norm(opt_mean - result["final_params"], ord=2) for opt_mean in opt_mean_path_in_old_basis ] distance_to_final_plot_name = f"distance_to_final over generations of CMA " plot_2d(cma_and_then_ppo_plot_dir, distance_to_final_plot_name, np.arange(len(distance_to_final)), distance_to_final, "num generation", "distance_to_final", False) distance_to_final_ppo = [ LA.norm(opt_mean - result["final_params"], ord=2) for opt_mean in conti_ppo_params ] distance_to_final_plot_name = f"distance_to_final over generations of PPO" plot_2d(cma_and_then_ppo_plot_dir, distance_to_final_plot_name, np.arange(len(distance_to_final_ppo)), distance_to_final_ppo, "num generation", "distance_to_final", False)
def visualize_policy_and_collect_COM(seed, run_num, policy_env, policy_num_timesteps, policy_seed, policy_run_num): logger.log(sys.argv) common_arg_parser = get_common_parser() args, cma_unknown_args = common_arg_parser.parse_known_args() args.env = policy_env args.seed = policy_seed args.num_timesteps = policy_num_timesteps args.run_num = policy_run_num this_run_dir = get_dir_path_for_this_run(args) traj_params_dir_name = get_full_params_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] def make_env(): env_out = gym.make(args.env) env_out.env.disableViewer = False env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) env_out.seed(seed) return env_out env = DummyVecEnv([make_env]) if args.normalize: env = VecNormalize(env) model = PPO2.load(f"{save_dir}/ppo2", seed=seed) model.set_pi_from_flat(final_params) if args.normalize: env.load_running_average(save_dir) sk = env.venv.envs[0].env.env.robot_skeleton lagrangian_values = {} obs = np.zeros((env.num_envs, ) + env.observation_space.shape) obs[:] = env.reset() plot_dir = get_plot_dir(policy_env=args.env, policy_num_timesteps=policy_num_timesteps, policy_run_num=policy_run_num, policy_seed=policy_seed, eval_seed=seed, eval_run_num=run_num, additional_note="") if os.path.exists(plot_dir): shutil.rmtree(plot_dir) os.makedirs(plot_dir) env = VecVideoRecorder(env, plot_dir, record_video_trigger=lambda x: x == 0, video_length=3000, name_prefix="3000000agent-{}".format(args.env)) lagrangian_values["M"] = [sk.M.reshape((-1, 1))] lagrangian_values["COM"] = [sk.C.reshape((-1, 1))] lagrangian_values["Coriolis"] = [sk.c.reshape((-1, 1))] lagrangian_values["q"] = [sk.q.reshape((-1, 1))] lagrangian_values["dq"] = [sk.dq.reshape((-1, 1))] contact_values = {} neuron_values = model.give_neuron_values(obs) raw_layer_values_list = [[neuron_value.reshape((-1, 1))] for neuron_value in neuron_values] env.render() ep_infos = [] steps_to_first_done = 0 first_done = False # epi_rew = 0 for _ in range(3000): actions = model.step(obs)[0] # yield neuron_values obs, rew, done, infos = env.step(actions) # epi_rew+= rew[0] if done and not first_done: first_done = True if not first_done: steps_to_first_done += 1 neuron_values = model.give_neuron_values(obs) for i, layer in enumerate(neuron_values): raw_layer_values_list[i].append(layer.reshape((-1, 1))) # fill_contacts_jac_dict(infos[0]["contacts"], contact_dict=contact_values, neuron_values=neuron_values) lagrangian_values["M"].append(sk.M.reshape((-1, 1))) lagrangian_values["q"].append(sk.q.reshape((-1, 1))) lagrangian_values["dq"].append(sk.dq.reshape((-1, 1))) lagrangian_values["COM"].append(sk.C.reshape((-1, 1))) lagrangian_values["Coriolis"].append(sk.c.reshape((-1, 1))) # env.render() # time.sleep(1) for info in infos: maybe_ep_info = info.get('episode') if maybe_ep_info is not None: ep_infos.append(maybe_ep_info) env.render() done = done.any() if done: episode_rew = safe_mean([ep_info['r'] for ep_info in ep_infos]) print(f'episode_rew={episode_rew}') # print(f'episode_rew={epi_rew}') # epi_rew = 0 obs = env.reset() #Hstack into a big matrix lagrangian_values["M"] = np.hstack(lagrangian_values["M"]) lagrangian_values["COM"] = np.hstack(lagrangian_values["COM"]) lagrangian_values["Coriolis"] = np.hstack(lagrangian_values["Coriolis"]) lagrangian_values["q"] = np.hstack(lagrangian_values["q"]) lagrangian_values["dq"] = np.hstack(lagrangian_values["dq"]) # for contact_body_name, l in contact_values.items(): # body_contact_dict = contact_values[contact_body_name] # for name, l in body_contact_dict.items(): # body_contact_dict[name] = np.hstack(body_contact_dict[name]) input_values = np.hstack(raw_layer_values_list[0]) layers_values = [ np.hstack(layer_list) for layer_list in raw_layer_values_list ][1:-2] # drop variance and inputs for i, com in enumerate(lagrangian_values["COM"]): plt.figure() plt.plot(np.arange(len(com)), com) plt.xlabel("time") plt.ylabel(f"COM{i}") plt.savefig(f"{plot_dir}/COM{i}.jpg") plt.close()
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() args, cma_unknown_args = common_arg_parser.parse_known_args() this_run_dir = get_dir_path_for_this_run(args) plot_dir_alg = get_plot_dir(args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir, params_scope="pi") save_dir = get_save_dir(this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) if not os.path.exists(plot_dir_alg): os.makedirs(plot_dir_alg) final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] fig = plt.figure(figsize=(12, 12)) ax = fig.gca() ax.axis('off') preload_neuron_values_list = preload_neurons(args, save_dir, final_params, args.eval_num_timesteps) obz_norm, latent_norm, dist_norm = give_normalizers( preload_neuron_values_list) latent_cmap = plt.get_cmap("Oranges") obz_cmap = plt.get_cmap("Blues") dist_cmap = plt.get_cmap("Greys") neuron_values_gen = neuron_values_generator(args, save_dir, final_params, args.eval_num_timesteps) left, right, bottom, top = 0.1, 0.9, 0.1, 0.9 result_artists = [] def init(): try: first_neurons = neuron_values_gen.__next__() except StopIteration: return layer_sizes = [layer.shape[1] for layer in first_neurons] first_neurons = np.array( [neuron_value.reshape(-1) for neuron_value in first_neurons]) v_spacing = (top - bottom) / float(max(layer_sizes)) h_spacing = (right - left) / float(len(layer_sizes) - 1) # Nodes for n, neuron_layer_value in enumerate(first_neurons): neuron_layer_value = neuron_layer_value.reshape(-1) layer_size = len(neuron_layer_value) layer_top = v_spacing * (layer_size - 1) / 2. + (top + bottom) / 2. for m, neuron_value in enumerate(neuron_layer_value): if n == 0: # obz circle = plt.Circle( (n * h_spacing + left, layer_top - m * v_spacing), v_spacing / 4., color=obz_cmap(obz_norm(neuron_value)), ec='k', zorder=4) elif n >= len(first_neurons) - 2: # dist circle = plt.Circle( (n * h_spacing + left, layer_top - m * v_spacing), v_spacing / 4., color=dist_cmap(dist_norm(neuron_value)), ec='k', zorder=4) else: #latent circle = plt.Circle( (n * h_spacing + left, layer_top - m * v_spacing), v_spacing / 4., color=latent_cmap(latent_norm(neuron_value)), ec='k', zorder=4) ax.add_artist(circle) result_artists.append(circle) return result_artists def update_neuron(neuron_values): num_of_obz_neurons = neuron_values[0].reshape(-1).shape[0] num_of_dist_neurons = np.concatenate(neuron_values[-2:], axis=1).shape[1] neuron_values = np.concatenate(neuron_values, axis=1).reshape(-1).ravel() for i, neuron_value in enumerate(neuron_values): if i < num_of_obz_neurons: # obz result_artists[i].set_color(obz_cmap(obz_norm(neuron_value))) elif i >= neuron_values.shape[0] - num_of_dist_neurons: # dist result_artists[i].set_color(dist_cmap(dist_norm(neuron_value))) else: #latent result_artists[i].set_color( latent_cmap(latent_norm(neuron_value))) # plt.draw() return result_artists rot_animation = FuncAnimation(fig, update_neuron, frames=neuron_values_gen, init_func=init, interval=3) plt.show() print(f"~~~~~~~~~~~~~~~~~~~~~~saving to {plot_dir_alg}/neuron_vis.pdf") file_path = f"{plot_dir_alg}/neuron_firing.gif" if os.path.isfile(file_path): os.remove(file_path) rot_animation.save(file_path, dpi=80, writer='imagemagick')
def main(): import sys logger.log(sys.argv) common_arg_parser = get_common_parser() args, cma_unknown_args = common_arg_parser.parse_known_args() this_run_dir = get_dir_path_for_this_run(args) plot_dir_alg = get_plot_dir(args) traj_params_dir_name = get_full_params_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) if not os.path.exists(plot_dir_alg): os.makedirs(plot_dir_alg) final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] def make_env(): env_out = gym.make(args.env) env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) # env_out = gym.make(args.env) # env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) if args.normalize: env = VecNormalize(env) # policy = MlpPolicy model = PPO2.load(f"{save_dir}/ppo2") # this also loads V function # model = PPO2(policy=policy, env=env, n_steps=args.n_steps, nminibatches=args.nminibatches, lam=0.95, gamma=0.99, noptepochs=10, # ent_coef=0.0, learning_rate=3e-4, cliprange=0.2, optimizer=args.optimizer) model.set_pi_from_flat(final_params) if args.normalize: env.load_running_average(save_dir) sk = env.venv.envs[0].env.env.robot_skeleton lagrangian_values = {} obs = np.zeros((env.num_envs, ) + env.observation_space.shape) obs[:] = env.reset() # env = VecVideoRecorder(env, "./", # record_video_trigger=lambda x: x == 0, video_length=3000, # name_prefix="3000000agent-{}".format(args.env)) lagrangian_values["M"] = [sk.M.reshape((-1, 1))] lagrangian_values["COM"] = [sk.C.reshape((-1, 1))] lagrangian_values["Coriolis"] = [sk.c.reshape((-1, 1))] lagrangian_values["q"] = [sk.q.reshape((-1, 1))] lagrangian_values["dq"] = [sk.dq.reshape((-1, 1))] contact_values = {} neuron_values = model.give_neuron_values(obs) layer_values_list = [[neuron_value.reshape((-1, 1))] for neuron_value in neuron_values] env.render() ep_infos = [] steps_to_first_done = 0 first_done = False for _ in range(3000): actions = model.step(obs)[0] # yield neuron_values obs, rew, done, infos = env.step(actions) if done and not first_done: first_done = True if not first_done: steps_to_first_done += 1 neuron_values = model.give_neuron_values(obs) for i, layer in enumerate(neuron_values): layer_values_list[i].append(layer.reshape((-1, 1))) fill_contacts_jac_dict(infos[0]["contacts"], contact_dict=contact_values, neuron_values=neuron_values) lagrangian_values["M"].append(sk.M.reshape((-1, 1))) lagrangian_values["q"].append(sk.q.reshape((-1, 1))) lagrangian_values["dq"].append(sk.dq.reshape((-1, 1))) lagrangian_values["COM"].append(sk.C.reshape((-1, 1))) lagrangian_values["Coriolis"].append(sk.c.reshape((-1, 1))) env.render() # time.sleep(1) for info in infos: maybe_ep_info = info.get('episode') if maybe_ep_info is not None: ep_infos.append(maybe_ep_info) # env.render() done = done.any() if done: episode_rew = safe_mean([ep_info['r'] for ep_info in ep_infos]) print(f'episode_rew={episode_rew}') obs = env.reset() #Hstack into a big matrix lagrangian_values["M"] = np.hstack(lagrangian_values["M"]) lagrangian_values["COM"] = np.hstack(lagrangian_values["COM"]) lagrangian_values["Coriolis"] = np.hstack(lagrangian_values["Coriolis"]) lagrangian_values["q"] = np.hstack(lagrangian_values["q"]) lagrangian_values["dq"] = np.hstack(lagrangian_values["dq"]) for contact_body_name, l in contact_values.items(): body_contact_dict = contact_values[contact_body_name] for name, l in body_contact_dict.items(): body_contact_dict[name] = np.hstack(body_contact_dict[name]) layer_values_list = [ np.hstack(layer_list) for layer_list in layer_values_list ][1:-2] # drop variance # plt.scatter(lagrangian_values["M"][15], layer_values_list[1][2]) # plt.scatter(lagrangian_values["M"][11], layer_values_list[0][63]) out_dir = f"/home/panda-linux/PycharmProjects/low_dim_update_dart/low_dim_update_stable/neuron_vis/plots_{args.env}_{args.num_timesteps}" if os.path.exists(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) all_weights = model.get_all_weight_values() for ind, weights in enumerate(all_weights): fname = f"{out_dir}/weights_layer_{ind}.txt" np.savetxt(fname, weights) PLOT_CUTOFF = steps_to_first_done plot_everything(lagrangian_values, layer_values_list, out_dir, PLOT_CUTOFF) scatter_the_linear_significant_ones(lagrangian_values, layer_values_list, threshold=0.6, out_dir=out_dir) scatter_the_nonlinear_significant_but_not_linear_ones( lagrangian_values, layer_values_list, linear_threshold=0.3, nonlinear_threshold=0.6, out_dir=out_dir) # # contact_dicts = {} # for contact_body_name, l in contact_values.items(): # body_contact_dict = contact_values[contact_body_name] # # # contact_dicts[contact_body_name] = {} # # build_dict = contact_dicts[contact_body_name] # # build_dict["body"] = {} # build_dict["layer"] = {} # for name, l in body_contact_dict.items(): # for i in range(len(l)): # # if name == contact_body_name: # build_dict["body"][f"{contact_body_name}_{i}"] = l[i] # else: # build_dict["layer"][f"layer_{name}_neuron_{i}"] = l[i] # # body_contact_df = pd.DataFrame.from_dict(build_dict["body"], "index") # layer_contact_df = pd.DataFrame.from_dict(build_dict["layer"], "index") # body_contact_df.to_csv(f"{data_dir}/{contact_body_name}_contact.txt", sep='\t') # layer_contact_df.to_csv(f"{data_dir}/{contact_body_name}_layers.txt", sep='\t') # #TO CSV format # data_dir = f"/home/panda-linux/PycharmProjects/low_dim_update_dart/mictools/examples/neuron_vis_data{args.env}_time_steps_{args.num_timesteps}" # if os.path.exists(data_dir): # shutil.rmtree(data_dir) # # os.makedirs(data_dir) # # for contact_body_name, d in contact_dicts.items(): # # build_dict = d # # body_contact_df = pd.DataFrame.from_dict(build_dict["body"], "index") # layer_contact_df = pd.DataFrame.from_dict(build_dict["layer"], "index") # # body_contact_df.to_csv(f"{data_dir}/{contact_body_name}_contact.txt", sep='\t') # layer_contact_df.to_csv(f"{data_dir}/{contact_body_name}_layers.txt", sep='\t') # # # # neurons_dict = {} # for layer_index in range(len(layer_values_list)): # for neuron_index in range(len(layer_values_list[layer_index])): # neurons_dict[f"layer_{layer_index}_neuron_{neuron_index}"] = layer_values_list[layer_index][neuron_index] # # for i in range(len(lagrangian_values["COM"])): # neurons_dict[f"COM_index_{i}"] = lagrangian_values["COM"][i] # # neuron_df = pd.DataFrame.from_dict(neurons_dict, "index") # # # # lagrangian_dict = {} # for k,v in lagrangian_values.items(): # for i in range(len(v)): # lagrangian_dict[f"{k}_index_{i}"] = v[i] # # lagrangian_df = pd.DataFrame.from_dict(lagrangian_dict, "index") # # # neuron_df.to_csv(f"{data_dir}/neurons.txt", sep='\t') # lagrangian_df.to_csv(f"{data_dir}/lagrangian.txt", sep='\t') # cor = {} # best_cor = {} # cor["M"] = get_correlations(lagrangian_values["M"], layer_values_list) # best_cor["M"] = [np.max(np.abs(cor_m)) for cor_m in cor["M"]] # # # cor["COM"] = get_correlations(lagrangian_values["COM"], layer_values_list) # best_cor["COM"] = [np.max(np.abs(cor_m)) for cor_m in cor["COM"]] # # cor["Coriolis"] = get_correlations(lagrangian_values["Coriolis"], layer_values_list) # best_cor["Coriolis"] = [np.max(np.abs(cor_m)) for cor_m in cor["Coriolis"]] # best_cor["Coriolis_argmax"] = [np.argmax(np.abs(cor_m)) for cor_m in cor["Coriolis"]] # # # # # ncor = {} # nbest_cor = {} # ncor["M"] = get_normalized_correlations(lagrangian_values["M"], layer_values_list) # nbest_cor["M"] = [np.max(np.abs(cor_m)) for cor_m in ncor["M"]] # # # ncor["COM"] = get_normalized_correlations(lagrangian_values["COM"], layer_values_list) # nbest_cor["COM"] = [np.max(np.abs(cor_m)) for cor_m in ncor["COM"]] # # ncor["Coriolis"] = get_normalized_correlations(lagrangian_values["Coriolis"], layer_values_list) # nbest_cor["Coriolis"] = [np.max(np.abs(cor_m)) for cor_m in ncor["Coriolis"]] # nbest_cor["Coriolis_argmax"] = [np.argmax(np.abs(cor_m)) for cor_m in ncor["Coriolis"]] # # # # # # lin_reg = {"perm_1":{}, "perm_2":{}} # best_lin_reg = {"perm_1":{}, "perm_2":{}} # lin_reg["perm_1"]["M"], best_lin_reg["perm_1"]["M"] = get_results("M", lagrangian_values, layer_values_list, perm_num=1) # lin_reg["perm_2"]["M"], best_lin_reg["perm_2"]["M"] = get_results("M", lagrangian_values, layer_values_list, perm_num=2) # lin_reg["perm_1"]["COM"], best_lin_reg["perm_1"]["COM"] = get_results("COM", lagrangian_values, layer_values_list, perm_num=1) # lin_reg["perm_2"]["COM"], best_lin_reg["perm_2"]["COM"] = get_results("COM", lagrangian_values, layer_values_list, perm_num=2) # # # lin_reg_1["M"] = get_linear_regressions_1_perm(lagrangian_values["M"], layer_values_list) # lin_reg_2["M"] = get_linear_regressions_2_perm(lagrangian_values["M"], layer_values_list) # best_lin_reg_2["M"] = [] # for lin_l in lin_reg_2["M"]: # if lin_l == []: # best_lin_reg_2["M"].append([]) # else: # best_lin_reg_2["M"].append(lin_l[np.argmin(lin_l[:,0])]) # # best_lin_reg_1["M"] = [] # for lin_l in lin_reg_1["M"]: # if lin_l == []: # best_lin_reg_1["M"].append([]) # else: # best_lin_reg_1["M"].append(lin_l[np.argmin(lin_l[:,0])]) # best_lin_reg_1["M"] = np.array(best_lin_reg_1["M"]) # best_lin_reg_2["M"] = np.array(best_lin_reg_2["M"]) # # # lin_reg_1["M"].dump("lin_reg_1_M.txt") # lin_reg_2["M"].dump("lin_reg_2_M.txt") # best_lin_reg_1["M"].dump("best_lin_reg_1_M.txt") # best_lin_reg_2["M"].dump("best_lin_reg_2_M.txt") # # lin_reg_1["COM"] = get_linear_regressions_1_perm(lagrangian_values["COM"], layer_values_list) # lin_reg_2["COM"] = get_linear_regressions_2_perm(lagrangian_values["COM"], layer_values_list) # best_lin_reg_2["COM"] = [] # for lin_l in lin_reg_2["COM"]: # if lin_l == []: # best_lin_reg_2["COM"].append([]) # else: # best_lin_reg_2["COM"].append(lin_l[np.argmin(lin_l[:, 0])]) # # best_lin_reg_1["COM"] = [] # for lin_l in lin_reg_1["COM"]: # if lin_l == []: # best_lin_reg_1["COM"].append([]) # else: # best_lin_reg_1["COM"].append(lin_l[np.argmin(lin_l[:, 0])]) # # # best_lin_reg_1["COM"] = np.array(best_lin_reg_1["M"]) # best_lin_reg_2["COM"] = np.array(best_lin_reg_2["M"]) # lin_reg_1["COM"].dump("lin_reg_1_COM.txt") # lin_reg_2["COM"].dump("lin_reg_2_COM.txt") # best_lin_reg_1["COM"].dump("best_lin_reg_1_COM.txt") # best_lin_reg_2["COM"].dump("best_lin_reg_2_COM.txt") pass
def do_ppos(ppos_args, result, intermediate_data_dir, origin_param): ppos_args.alg = "ppo_subspace" logger.log(f"#######TRAIN: {ppos_args}") this_run_dir = get_dir_path_for_this_run(ppos_args) if os.path.exists(this_run_dir): import shutil shutil.rmtree(this_run_dir) os.makedirs(this_run_dir) log_dir = get_log_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) full_param_traj_dir_path = get_full_params_dir(this_run_dir) if os.path.exists(full_param_traj_dir_path): import shutil shutil.rmtree(full_param_traj_dir_path) os.makedirs(full_param_traj_dir_path) if os.path.exists(save_dir): import shutil shutil.rmtree(save_dir) os.makedirs(save_dir) run_info = {"full_param_traj_dir_path": full_param_traj_dir_path} logger.configure(log_dir) tic = time.time() def make_env(): env_out = gym.make(ppos_args.env) env_out.env.disableViewer = True env_out.env.visualize = False env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) return env_out env = DummyVecEnv([make_env]) if ppos_args.normalize: env = VecNormalize(env) set_global_seeds(ppos_args.seed) policy = MlpMultPolicy model = PPO2(policy=policy, env=env, n_steps=ppos_args.n_steps, nminibatches=ppos_args.nminibatches, lam=0.95, gamma=0.99, noptepochs=10, ent_coef=0.0, learning_rate=3e-4, cliprange=0.2, policy_kwargs={"num_comp": len(result["first_n_pcs"])}, pcs=result["first_n_pcs"], origin_theta=origin_param) model.tell_run_info(run_info) eprews, optimization_path = model.learn( total_timesteps=ppos_args.ppos_num_timesteps, give_optimization_path=True) toc = time.time() logger.log( f"####################################PPOS took {toc-tic} seconds") moving_ave_rewards = get_moving_aves(eprews, 100) return eprews, moving_ave_rewards, optimization_path
def main(): import sys logger.log(sys.argv) ppos_arg_parser = get_common_parser() ppos_args, ppos_unknown_args = ppos_arg_parser.parse_known_args() full_space_alg = ppos_args.alg # origin = "final_param" origin = ppos_args.origin this_run_dir = get_dir_path_for_this_run(ppos_args) traj_params_dir_name = get_full_params_dir(this_run_dir) intermediate_data_dir = get_intermediate_data_dir(this_run_dir) save_dir = get_save_dir(this_run_dir) if not os.path.exists(intermediate_data_dir): os.makedirs(intermediate_data_dir) ppos_run_num, ppos_intermediate_data_dir = generate_run_dir( get_ppos_returns_dirname, intermediate_dir=intermediate_data_dir, n_comp=ppos_args.n_comp_to_use) ''' ========================================================================================== get the pc vectors ========================================================================================== ''' proj_or_not = (ppos_args.n_comp_to_use == 2) result = do_pca(ppos_args.n_components, ppos_args.n_comp_to_use, traj_params_dir_name, intermediate_data_dir, proj=proj_or_not, origin=origin, use_IPCA=ppos_args.use_IPCA, chunk_size=ppos_args.chunk_size) ''' ========================================================================================== eval all xy coords ========================================================================================== ''' if origin == "final_param": origin_param = result["final_concat_params"] else: origin_param = result["mean_param"] final_param = result["final_concat_params"] last_proj_coord = do_proj_on_first_n(final_param, result["first_n_pcs"], origin_param) if origin == "final_param": back_final_param = low_dim_to_old_basis(last_proj_coord, result["first_n_pcs"], origin_param) assert np.testing.assert_almost_equal(back_final_param, final_param) starting_coord = last_proj_coord logger.log(f"PPOS STASRTING CORRD: {starting_coord}") # starting_coord = (1/2*np.max(xcoordinates_to_eval), 1/2*np.max(ycoordinates_to_eval)) # use mean assert result["first_n_pcs"].shape[0] == ppos_args.n_comp_to_use eprews, moving_ave_rewards, optimization_path = do_ppos( ppos_args, result, intermediate_data_dir, origin_param) ppos_args.alg = full_space_alg plot_dir = get_plot_dir(ppos_args) ppos_plot_dir = get_ppos_plot_dir(plot_dir, ppos_args.n_comp_to_use, ppos_run_num) if not os.path.exists(ppos_plot_dir): os.makedirs(ppos_plot_dir) ret_plot_name = f"cma return on {ppos_args.n_comp_to_use} dim space of real pca plane, " \ f"explained {np.sum(result['explained_variance_ratio'][:ppos_args.n_comp_to_use])}" plot_ppos_returns(ppos_plot_dir, ret_plot_name, moving_ave_rewards, show=False) if ppos_args.n_comp_to_use == 2: proj_coords = result["proj_coords"] assert proj_coords.shape[1] == 2 xcoordinates_to_eval, ycoordinates_to_eval = gen_subspace_coords( ppos_args, np.vstack((proj_coords, optimization_path)).T) eval_returns = do_eval_returns(ppos_args, intermediate_data_dir, result["first_n_pcs"], origin_param, xcoordinates_to_eval, ycoordinates_to_eval, save_dir, pca_center=origin) plot_contour_trajectory(ppos_plot_dir, "end_point_origin_eval_return_contour_plot", xcoordinates_to_eval, ycoordinates_to_eval, eval_returns, proj_coords[:, 0], proj_coords[:, 1], result["explained_variance_ratio"][:2], num_levels=25, show=False, sub_alg_path=optimization_path) opt_mean_path_in_old_basis = [ low_dim_to_old_basis(projected_opt_params, result["first_n_pcs"], origin_param) for projected_opt_params in optimization_path ] distance_to_final = [ LA.norm(opt_mean - final_param, ord=2) for opt_mean in opt_mean_path_in_old_basis ] distance_to_final_plot_name = f"distance_to_final over generations " plot_2d(ppos_plot_dir, distance_to_final_plot_name, np.arange(len(distance_to_final)), distance_to_final, "num generation", "distance_to_final", False)
def eval_trained_policy_and_collect_data(eval_seed, eval_run_num, policy_env, policy_num_timesteps, policy_seed, policy_run_num, additional_note): logger.log(sys.argv) common_arg_parser = get_common_parser() args, cma_unknown_args = common_arg_parser.parse_known_args() args.env = policy_env args.seed = policy_seed args.num_timesteps = policy_num_timesteps args.run_num = policy_run_num this_run_dir = get_dir_path_for_this_run(args) traj_params_dir_name = get_full_params_dir(this_run_dir) save_dir = get_save_dir( this_run_dir) final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final") final_params = pd.read_csv(final_file, header=None).values[0] def make_env(): env_out = gym.make(args.env) env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True) env_out.seed(eval_seed) return env_out env = DummyVecEnv([make_env]) running_env = env.envs[0].env.env set_global_seeds(eval_seed) running_env.seed(eval_seed) if args.normalize: env = VecNormalize(env) model = PPO2.load(f"{save_dir}/ppo2", seed=eval_seed) model.set_pi_from_flat(final_params) if args.normalize: env.load_running_average(save_dir) # is it necessary? running_env = env.venv.envs[0].env.env lagrangian_values = {} obs = np.zeros((env.num_envs,) + env.observation_space.shape) obs[:] = env.reset() # env = VecVideoRecorder(env, "./", # record_video_trigger=lambda x: x == 0, video_length=3000, # name_prefix="3000000agent-{}".format(args.env)) #init lagrangian values for lagrangian_key in lagrangian_keys: flat_array = running_env.get_lagrangian_flat_array(lagrangian_key) lagrangian_values[lagrangian_key] = [flat_array] neuron_values = model.give_neuron_values(obs) raw_layer_values_list = [[neuron_value.reshape((-1,1))] for neuron_value in neuron_values] # env.render() ep_infos = [] steps_to_first_done = 0 first_done = False for _ in range(30000): actions = model.step(obs)[0] # yield neuron_values obs, rew, done, infos = env.step(actions) if done and not first_done: first_done = True if not first_done: steps_to_first_done += 1 neuron_values = model.give_neuron_values(obs) for i, layer in enumerate(neuron_values): raw_layer_values_list[i].append(layer.reshape((-1,1))) # fill_contacts_jac_dict(infos[0]["contacts"], contact_dict=contact_values, neuron_values=neuron_values) # filling lagrangian values for lagrangian_key in lagrangian_keys: flat_array = running_env.get_lagrangian_flat_array(lagrangian_key) lagrangian_values[lagrangian_key].append(flat_array) # env.render() # time.sleep(1) for info in infos: maybe_ep_info = info.get('episode') if maybe_ep_info is not None: ep_infos.append(maybe_ep_info) # env.render() done = done.any() if done: episode_rew = safe_mean([ep_info['r'] for ep_info in ep_infos]) print(f'episode_rew={episode_rew}') obs = env.reset() #Hstack into a big matrix for lagrangian_key in lagrangian_keys: lagrangian_values[lagrangian_key] = np.hstack(lagrangian_values[lagrangian_key]) # for contact_body_name, l in contact_values.items(): # body_contact_dict = contact_values[contact_body_name] # for name, l in body_contact_dict.items(): # body_contact_dict[name] = np.hstack(body_contact_dict[name]) input_values = np.hstack(raw_layer_values_list[0]) layers_values = [np.hstack(layer_list) for layer_list in raw_layer_values_list][1:-2]# drop variance and inputs data_dir = get_data_dir(policy_env=args.env, policy_num_timesteps=policy_num_timesteps, policy_run_num=policy_run_num , policy_seed=policy_seed, eval_seed=eval_seed, eval_run_num=eval_run_num, additional_note=additional_note) if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) lagrangian_values_fn = f"{data_dir}/lagrangian.pickle" with open(lagrangian_values_fn, 'wb') as handle: pickle.dump(lagrangian_values, handle, protocol=pickle.HIGHEST_PROTOCOL) input_values_fn = f"{data_dir}/input_values.npy" layers_values_fn = f"{data_dir}/layer_values.npy" np.save(input_values_fn, input_values) np.save(layers_values_fn, layers_values) all_weights = model.get_all_weight_values() for ind, weights in enumerate(all_weights): fname = f"{data_dir}/weights_layer_{ind}.txt" np.savetxt(fname, weights)