from stable_baselines import A2C from stable_baselines.common.policies import ActorCriticPolicy,MlpPolicy, MlpLnLstmPolicy,RecurrentActorCriticPolicy from FireflyEnv import ffenv_new_cord from Config import Config arg=Config() arg.goal_radius_range=[0.3,0.5] import numpy as np env_new_cord=ffenv_new_cord.FireflyAgentCenter(arg) env_new_cord.max_distance=0.5 model=A2C(MlpLnLstmPolicy,env_new_cord,n_steps=) model.learn(100000)
def run_inverse(data=None,theta=None,filename=None): import os import warnings warnings.filterwarnings('ignore') from copy import copy import time import random seed=time.time().as_integer_ratio()[0] seed=0 random.seed(seed) import torch torch.manual_seed(seed) import numpy as np np.random.seed(int(seed)) from numpy import pi torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # -----------invser functions------------- from InverseFuncs import trajectory, getLoss, reset_theta, theta_range,reset_theta_log, single_inverse # ---------loading env and agent---------- from stable_baselines import DDPG,TD3 from FireflyEnv import ffenv_new_cord from Config import Config arg = Config() DISCOUNT_FACTOR = 0.99 arg.NUM_SAMPLES=2 arg.NUM_EP = 1000 arg.NUM_IT = 2 # number of iteration for gradient descent arg.NUM_thetas = 1 arg.ADAM_LR = 0.007 arg.LR_STEP = 2 arg.LR_STOP = 50 arg.lr_gamma = 0.95 arg.PI_STD=1 arg.goal_radius_range=[0.05,0.2] # agent convert to torch model import policy_torch baselines_mlp_model = TD3.load('trained_agent//TD_95gamma_mc_smallgoal_500000_9_24_1_6.zip') agent = policy_torch.copy_mlp_weights(baselines_mlp_model,layers=[128,128]) # loading enviorment, same as training env=ffenv_new_cord.FireflyAgentCenter(arg) env.agent_knows_phi=False true_theta_log = [] true_loss_log = [] true_loss_act_log = [] true_loss_obs_log = [] final_theta_log = [] stderr_log = [] result_log = [] number_update=100 if data is None: save_dict={'theta_estimations':[]} else: save_dict=data # use serval theta to inverse for num_thetas in range(arg.NUM_thetas): # make sure phi and true theta stay the same true_theta = torch.Tensor(data['true_theta']) env.presist_phi=True env.reset(phi=true_theta,theta=true_theta) # here we first testing teacher truetheta=phi case theta=torch.Tensor(data['theta_estimations'][0]) phi=torch.Tensor(data['phi']) save_dict['true_theta']=true_theta.data.clone().tolist() save_dict['phi']=true_theta.data.clone().tolist() save_dict['inital_theta']=theta.data.clone().tolist() for num_update in range(number_update): states, actions, tasks = trajectory( agent, phi, true_theta, env, arg.NUM_EP) result = single_theta_inverse(true_theta, phi, arg, env, agent, states, actions, tasks, filename, num_thetas, initial_theta=theta) save_dict['theta_estimations'].append(result.tolist()) if filename is None: savename=('inverse_data/' + filename + "EP" + str(arg.NUM_EP) + "updates" + str(number_update)+"sample"+str(arg.NUM_SAMPLES) +"IT"+ str(arg.NUM_IT) + '.pkl') torch.save(save_dict, savename) elif filename[:-4]=='.pkl': torch.save(save_dict, filename) else: torch.save(save_dict, (filename+'.pkf')) print(result) print('done')
import time from stable_baselines.ddpg.policies import LnMlpPolicy, MlpPolicy # from stable_baselines.common.policies import MlpPolicy from stable_baselines import DDPG from FireflyEnv import ffenv_new_cord from Config import Config arg = Config() from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise from reward_functions import reward_singleff goal_radius_range = [0.1, 0.25] env = ffenv_new_cord.FireflyAgentCenter( arg, # {'goal_radius_range':goal_radius_range, { 'reward_function': reward_singleff.belief_reward, 'max_distance': 0.5 }) model = DDPG.load( 'DDPG_reward0.97_500000_4 16 20 47', tensorboard_log="./DDPG_tb/", full_tensorboard_log=False, batch_size=512, buffer_size=int(1e5), # gamma=0.99, # memory_policy=None, # eval_env=None, # nb_train_steps=100, # nb_rollout_steps=100,