class Agent(): def __init__(self, alpha, beta, input_dims, tau, n_actions, gamma=0.99, max_size=50000, fc1_dims=400, fc2_dims=300, batch_size=32): self.gamma = gamma self.tau = tau self.batch_size = batch_size self.alpha = alpha self.beta = beta self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.noise = OUActionNoise(mu=np.zeros(n_actions)) self.actor = ActorNetwork(alpha, input_dims, fc1_dims, fc2_dims, n_actions=n_actions, name='actor') self.critic = CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions=n_actions, name='critic') self.target_actor = ActorNetwork(alpha, input_dims, fc1_dims, fc2_dims, n_actions=n_actions, name='target_actor') self.target_critic = CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions=n_actions, name='target_critic') self.update_network_parameters( tau=1) # for the first time target_actor and actor are same def choose_action(self, observation): self.actor.eval( ) # we are setting our actor network to eval mode because we have batch normalization layer # and we dont want to calculate statistics for that layer at this step state = T.tensor([observation], dtype=T.float).to(self.actor.device) mu = self.actor.forward(state).to(self.actor.device) mu_prime = mu + T.tensor(self.noise(), dtype=T.float).to( self.actor.device) self.actor.train() return mu_prime.cpu().detach().numpy()[0] def remember(self, state, action, reward, state_, done): self.memory.store_transition(state, action, reward, state_, done) def save_models(self): self.actor.save_checkpoint() self.target_actor.save_checkpoint() self.critic.save_checkpoint() self.target_critic.save_checkpoint() def load_models(self): self.actor.load_checkpoint() self.target_actor.load_checkpoint() self.critic.load_checkpoint() self.target_critic.load_checkpoint() def learn(self): if self.memory.mem_cntr < self.batch_size: return states, actions, rewards, states_, done = self.memory.sample_buffer( self.batch_size) states = T.tensor(states, dtype=T.float).to(self.actor.device) states_ = T.tensor(states_, dtype=T.float).to(self.actor.device) actions = T.tensor(actions, dtype=T.float).to(self.actor.device) rewards = T.tensor(rewards, dtype=T.float).to(self.actor.device) done = T.tensor(done).to(self.actor.device) target_actions = self.target_actor.forward(states_) critic_value_ = self.target_critic.forward(states_, target_actions) critic_value = self.critic.forward(states, actions) critic_value_[done] = 0.0 critic_value_ = critic_value_.view(-1) target = rewards + self.gamma * critic_value_ target = target.view(self.batch_size, 1) self.critic.optimizer.zero_grad() critic_loss = F.mse_loss(target, critic_value) critic_loss.backward() self.critic.optimizer.step() self.actor.optimizer.zero_grad() actor_loss = -self.critic.forward(states, self.actor.forward(states)) actor_loss = T.mean(actor_loss) actor_loss.backward() self.actor.optimizer.step() self.update_network_parameters( ) # sending tau None so that local tau variable there takes the value of class tau variable def update_network_parameters(self, tau=None): if tau is None: tau = self.tau actor_params = self.actor.named_parameters() critic_params = self.critic.named_parameters() target_actor_params = self.target_actor.named_parameters() target_critic_params = self.target_critic.named_parameters() critic_state_dict = dict(critic_params) actor_state_dict = dict(actor_params) target_critic_state_dict = dict(target_critic_params) target_actor_state_dict = dict(target_actor_params) for name in critic_state_dict: critic_state_dict[name] = tau * critic_state_dict[name].clone() + ( 1 - tau) * target_critic_state_dict[name].clone() for name in actor_state_dict: actor_state_dict[name] = tau * actor_state_dict[name].clone() + ( 1 - tau) * target_actor_state_dict[name].clone() self.target_critic.load_state_dict(critic_state_dict) self.target_actor.load_state_dict(actor_state_dict)
class Agent: def __init__(self, alpha=0.0003, beta=0.0003, input_dims=[8], env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.actor = ActorNetwork(n_actions=n_actions, name='actor', max_action=env.action_space.high) self.critic_1 = CriticNetwork(n_actions=n_actions, name='critic_1') self.critic_2 = CriticNetwork(n_actions=n_actions, name='critic_2') self.value = ValueNetwork(name='value') self.target_value = ValueNetwork(name='target_value') self.actor.compile(optimizer=Adam(learning_rate=alpha)) self.critic_1.compile(optimizer=Adam(learning_rate=beta)) self.critic_2.compile(optimizer=Adam(learning_rate=beta)) self.value.compile(optimizer=Adam(learning_rate=beta)) self.target_value.compile(optimizer=Adam(learning_rate=beta)) self.scale = reward_scale self.update_network_parameters(tau=1) def choose_action(self, observation): state = tf.convert_to_tensor([observation]) actions, _ = self.actor.sample_normal(state, reparameterize=False) return actions[0] def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau weights = [] targets = self.target_value.weights for i, weight in enumerate(self.value.weights): weights.append(weight * tau + targets[i] * (1 - tau)) self.target_value.set_weights(weights) def save_models(self): print('... saving models ...') self.actor.save_weights(self.actor.checkpoint_file) self.critic_1.save_weights(self.critic_1.checkpoint_file) self.critic_2.save_weights(self.critic_2.checkpoint_file) self.value.save_weights(self.value.checkpoint_file) self.target_value.save_weights(self.target_value.checkpoint_file) def load_models(self): print('... loading models ...') self.actor.load_weights(self.actor.checkpoint_file) self.critic_1.load_weights(self.critic_1.checkpoint_file) self.critic_2.load_weights(self.critic_2.checkpoint_file) self.value.load_weights(self.value.checkpoint_file) self.target_value.load_weights(self.target_value.checkpoint_file) def learn(self): if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = \ self.memory.sample_buffer(self.batch_size) states = tf.convert_to_tensor(state, dtype=tf.float32) states_ = tf.convert_to_tensor(new_state, dtype=tf.float32) rewards = tf.convert_to_tensor(reward, dtype=tf.float32) actions = tf.convert_to_tensor(action, dtype=tf.float32) with tf.GradientTape() as tape: value = tf.squeeze(self.value(states), 1) value_ = tf.squeeze(self.target_value(states_), 1) current_policy_actions, log_probs = self.actor.sample_normal( states, reparameterize=False) log_probs = tf.squeeze(log_probs, 1) q1_new_policy = self.critic_1(states, current_policy_actions) q2_new_policy = self.critic_2(states, current_policy_actions) critic_value = tf.squeeze( tf.math.minimum(q1_new_policy, q2_new_policy), 1) value_target = critic_value - log_probs value_loss = 0.5 * keras.losses.MSE(value, value_target) value_network_gradient = tape.gradient(value_loss, self.value.trainable_variables) self.value.optimizer.apply_gradients( zip(value_network_gradient, self.value.trainable_variables)) with tf.GradientTape() as tape: # in the original paper, they reparameterize here. We don't implement # this so it's just the usual action. new_policy_actions, log_probs = self.actor.sample_normal( states, reparameterize=True) log_probs = tf.squeeze(log_probs, 1) q1_new_policy = self.critic_1(states, new_policy_actions) q2_new_policy = self.critic_2(states, new_policy_actions) critic_value = tf.squeeze( tf.math.minimum(q1_new_policy, q2_new_policy), 1) actor_loss = log_probs - critic_value actor_loss = tf.math.reduce_mean(actor_loss) actor_network_gradient = tape.gradient(actor_loss, self.actor.trainable_variables) self.actor.optimizer.apply_gradients( zip(actor_network_gradient, self.actor.trainable_variables)) with tf.GradientTape(persistent=True) as tape: # I didn't know that these context managers shared values? q_hat = self.scale * reward + self.gamma * value_ * (1 - done) q1_old_policy = tf.squeeze(self.critic_1(state, action), 1) q2_old_policy = tf.squeeze(self.critic_2(state, action), 1) critic_1_loss = 0.5 * keras.losses.MSE(q1_old_policy, q_hat) critic_2_loss = 0.5 * keras.losses.MSE(q2_old_policy, q_hat) critic_1_network_gradient = tape.gradient( critic_1_loss, self.critic_1.trainable_variables) critic_2_network_gradient = tape.gradient( critic_2_loss, self.critic_2.trainable_variables) self.critic_1.optimizer.apply_gradients( zip(critic_1_network_gradient, self.critic_1.trainable_variables)) self.critic_2.optimizer.apply_gradients( zip(critic_2_network_gradient, self.critic_2.trainable_variables)) self.update_network_parameters()
class Agent: def __init__(self, input_dims, alpha=0.001, beta=0.002, env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, fc1=400, fc2=300, batch_size=64, noise=0.1): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.noise = noise self.max_action = env.action_space.high[0] self.min_action = env.action_space.low[0] self.actor = ActorNetwork(n_actions=n_actions, name='actor') self.critic = CriticNetwork(name='critic') self.target_actor = ActorNetwork(n_actions=n_actions, name='target_actor') self.target_critic = CriticNetwork(name='target_critic') self.actor.compile(optimizer=Adam(learning_rate=alpha)) self.critic.compile(optimizer=Adam(learning_rate=beta)) self.target_actor.compile(optimizer=Adam(learning_rate=alpha)) self.target_critic.compile(optimizer=Adam(learning_rate=beta)) self.update_network_parameters(tau=1) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau weights = [] targets = self.target_actor.weights for i, weight in enumerate(self.actor.weights): weights.append(weight * tau + targets[i] * (1 - tau)) self.target_actor.set_weights(weights) weights = [] targets = self.target_critic.weights for i, weight in enumerate(self.critic.weights): weights.append(weight * tau + targets[i] * (1 - tau)) self.target_critic.set_weights(weights) def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def save_models(self): print('... saving models ...') self.actor.save_weights(self.actor.checkpoint_file) self.target_actor.save_weights(self.target_actor.checkpoint_file) self.critic.save_weights(self.critic.checkpoint_file) self.target_critic.save_weights(self.target_critic.checkpoint_file) def load_models(self): print('... loading models ...') self.actor.load_weights(self.actor.checkpoint_file) self.target_actor.load_weights(self.target_actor.checkpoint_file) self.critic.load_weights(self.critic.checkpoint_file) self.target_critic.load_weights(self.target_critic.checkpoint_file) def choose_action(self, observation, evaluate=False): state = tf.convert_to_tensor([observation], dtype=tf.float32) actions = self.actor(state) if not evaluate: actions += tf.random.normal(shape=[self.n_actions], mean=0.0, stddev=self.noise) # note that if the env has an action > 1, we have to multiply by # max action at some point actions = tf.clip_by_value(actions, self.min_action, self.max_action) return actions[0] def learn(self): if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = \ self.memory.sample_buffer(self.batch_size) states = tf.convert_to_tensor(state, dtype=tf.float32) states_ = tf.convert_to_tensor(new_state, dtype=tf.float32) rewards = tf.convert_to_tensor(reward, dtype=tf.float32) actions = tf.convert_to_tensor(action, dtype=tf.float32) with tf.GradientTape() as tape: target_actions = self.target_actor(states_) critic_value_ = tf.squeeze( self.target_critic(states_, target_actions), 1) critic_value = tf.squeeze(self.critic(states, actions), 1) target = rewards + self.gamma * critic_value_ * (1 - done) critic_loss = keras.losses.MSE(target, critic_value) critic_network_gradient = tape.gradient( critic_loss, self.critic.trainable_variables) self.critic.optimizer.apply_gradients( zip(critic_network_gradient, self.critic.trainable_variables)) with tf.GradientTape() as tape: new_policy_actions = self.actor(states) actor_loss = -self.critic(states, new_policy_actions) actor_loss = tf.math.reduce_mean(actor_loss) actor_network_gradient = tape.gradient(actor_loss, self.actor.trainable_variables) self.actor.optimizer.apply_gradients( zip(actor_network_gradient, self.actor.trainable_variables)) self.update_network_parameters()
class Agent_sm(): def __init__(self, alpha=0.0003, beta=0.0003, input_dims=8, env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2): self.gamma = 0.99 self.tau = tau self.memeory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.actor = ActorNetwork(alpha, input_dims, env.action_space.high, n_actions=n_actions) self.critic_1 = CriticNetwork(beta, input_dims, n_actions, name='critic_1') self.critic_2 = CriticNetwork(beta, input_dims, n_actions, name='critic_2') self.value = ValueNetwork(beta, input_dims, name='value') self.target_value = ValueNetwork(beta, input_dims, name='target_value') self.scale = reward_scale self.update_network_parameters(tau=1) def choose_action(self, observation): state = torch.Tensor([observation]).to(self.actor.device) actions, _ = self.actor.sample_normal(state, reparameterize=False) return actions.cpu().detach().numpy()[0] def remember(self, state, action, reward, new_state, done): self.memeory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau target_value_params = self.target_value.named_parameters() value_params = self.value.named_parameters() target_value_dict = dict(target_value_params) value_dict = dict(value_params) for name in target_value_dict: target_value_dict[name] = tau*value_dict[name].clone() + \ (1-tau)*target_value_dict[name].clone() self.target_value.load_state_dict(target_value_dict) def save_models(self): print('... saving models ...') self.actor.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() self.value.save_checkpoint() self.target_value.save_checkpoint() def load_models(self): print('... loading models ...') self.actor.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() self.value.load_checkpoint() self.target_value.load_checkpoint() def learn(self): if self.memeory.mem_cntr < self.batch_size: return states, new_states, actions, rewards, dones = self.memeory.sample_buffer( self.batch_size) states = torch.tensor(states, dtype=torch.float).to(self.actor.device) new_states = torch.tensor(new_states, dtype=torch.float).to(self.actor.device) actions = torch.tensor(actions, dtype=torch.float).to(self.actor.device) rewards = torch.tensor(rewards, dtype=torch.float).to(self.actor.device) dones = torch.tensor(dones).to(self.actor.device) states_value = self.value(states).view(-1) new_states_value = self.target_value(new_states).view(-1) new_states_value[dones] = 0.0 action, log_probs = self.actor.sample_normal(states, reparameterize=False) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1(states, action) q2_new_policy = self.critic_2(states, action) critic_value = torch.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) self.value.optimizer.zero_grad() value_target = critic_value - log_probs value_loss = 0.5 * F.mse_loss(states_value, value_target) value_loss.backward(retain_graph=True) self.value.optimizer.step() action, log_probs = self.actor.sample_normal(states, reparameterize=True) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1(states, action) q2_new_policy = self.critic_2(states, action) critic_value = torch.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) actor_loss = log_probs - critic_value actor_loss = torch.mean(actor_loss) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q_hat = self.scale * rewards + self.gamma * new_states_value q1_old_policy = self.critic_1(states, actions).view(-1) q2_old_policy = self.critic_2(states, actions).view(-1) critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) critic_loss = critic1_loss + critic2_loss critic_loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.update_network_parameters() # value_loss = value_loss.cpu().detach().numpy()[0] # actor_loss = actor_loss.cpu().detach().numpy()[0] # critic_loss = critic_loss.cpu().detach().numpy()[0] return 0, value_loss, actor_loss, critic_loss def learn_sm(self, sm_reg=1): if self.memeory.mem_cntr < self.batch_size: return states, new_states, actions, rewards, dones = self.memeory.sample_buffer( self.batch_size) states = torch.tensor(states, dtype=torch.float).to(self.actor.device) new_states = torch.tensor(new_states, dtype=torch.float).to(self.actor.device) actions = torch.tensor(actions, dtype=torch.float).to(self.actor.device) rewards = torch.tensor(rewards, dtype=torch.float).to(self.actor.device) dones = torch.tensor(dones).to(self.actor.device) states_value = self.value(states).view(-1) new_states_value = self.target_value(new_states).view(-1) new_states_value[dones] = 0.0 # action, log_probs = self.actor.sample_normal(states, reparameterize=False) # log_probs = log_probs.view(-1) # q1_new_policy = self.critic_1(states, action) # q2_new_policy = self.critic_2(states, action) # critic_value = torch.min(q1_new_policy, q2_new_policy) # critic_value = critic_value.view(-1) # self.value.optimizer.zero_grad() # value_target = critic_value - log_probs # value_loss = 0.5 * F.mse_loss(states_value, value_target) # value_loss.backward(retain_graph=True) # self.value.optimizer.step() # action, log_probs = self.actor.sample_normal(states, reparameterize=True) action, _ = self.actor.sample_normal(states, reparameterize=True) # log_probs = log_probs.view(-1) q1_new_policy = self.critic_1(states, action) q2_new_policy = self.critic_2(states, action) critic_value = torch.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) # sample actions for next batch states action_next, _ = self.actor.sample_normal(new_states, reparameterize=True) q1_new_policy = self.critic_1(new_states, action_next) q2_new_policy = self.critic_2(new_states, action_next) critic_value_next = torch.min(q1_new_policy, q2_new_policy) critic_value_next = critic_value.view(-1) # actor_loss = log_probs - critic_value actor_loss = -(critic_value + critic_value_next) + sm_reg * F.mse_loss( action, action_next) actor_loss = torch.mean(actor_loss) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() # self.critic_1.optimizer.zero_grad() # self.critic_2.optimizer.zero_grad() # q_hat = self.scale*rewards + self.gamma*new_states_value # q1_old_policy = self.critic_1(states, actions).view(-1) # q2_old_policy = self.critic_2(states, actions).view(-1) # critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) # critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) # critic_loss = critic1_loss + critic2_loss # critic_loss.backward() # self.critic_1.optimizer.step() # self.critic_2.optimizer.step() # self.update_network_parameters() return 0, 0, actor_loss, 0
class Agent(): def __init__(self, alpha=0.0003, beta=0.0003, input_dims=[8], env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions, name='actor', max_action=env.action_space.high) self.critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions, name='critic_1') self.critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions, name='critic_2') self.value = ValueNetwork(beta, input_dims, name='value') self.target_value = ValueNetwork(beta, input_dims, name='target_value') self.scale = reward_scale self.update_network_parameters(tau=1) def choose_action(self, observation): state = T.Tensor([observation]).to(self.actor.device) actions, _ = self.actor.sample_normal(state, reparameterize=False) return actions.cpu().detach().numpy()[0] def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau target_value_params = self.target_value.named_parameters() value_params = self.value.named_parameters() target_value_state_dict = dict(target_value_params) value_state_dict = dict(value_params) for name in value_state_dict: value_state_dict[name] = tau*value_state_dict[name].clone() + \ (1-tau)*target_value_state_dict[name].clone() self.target_value.load_state_dict(value_state_dict) def save_models(self): print('.... saving models ....') self.actor.save_checkpoint() self.value.save_checkpoint() self.target_value.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() def load_models(self): print('.... loading models ....') self.actor.load_checkpoint() self.value.load_checkpoint() self.target_value.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() def learn(self): if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = \ self.memory.sample_buffer(self.batch_size) reward = T.tensor(reward, dtype=T.float).to(self.actor.device) done = T.tensor(done).to(self.actor.device) state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device) state = T.tensor(state, dtype=T.float).to(self.actor.device) action = T.tensor(action, dtype=T.float).to(self.actor.device) value = self.value(state).view(-1) value_ = self.target_value(state_).view(-1) value_[done] = 0.0 actions, log_probs = self.actor.sample_normal(state, reparameterize=False) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1.forward(state, actions) q2_new_policy = self.critic_2.forward(state, actions) critic_value = T.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) self.value.optimizer.zero_grad() value_target = critic_value - log_probs value_loss = 0.5 * F.mse_loss(value, value_target) value_loss.backward(retain_graph=True) self.value.optimizer.step() actions, log_probs = self.actor.sample_normal(state, reparameterize=True) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1.forward(state, actions) q2_new_policy = self.critic_2.forward(state, actions) critic_value = T.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) actor_loss = log_probs - critic_value actor_loss = T.mean(actor_loss) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q_hat = self.scale * reward + self.gamma * value_ q1_old_policy = self.critic_1.forward(state, action).view(-1) q2_old_policy = self.critic_2.forward(state, action).view(-1) critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) critic_loss = critic_1_loss + critic_2_loss critic_loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.update_network_parameters()
class Agent(): def __init__(self, alpha=0.0003, beta=0.0003, input_dims=[8], env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions, name='actor', max_action=env.action_space.high) self.critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions, name='critic_1') self.critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions, name='critic_2') self.value = ValueNetwork(beta, input_dims, name='value') self.target_value = ValueNetwork(beta, input_dims, name='target_value') self.scale = reward_scale self.update_network_parameters(tau=1) #sets the parameters of Target-network equals to the #values of Target-network in the beginning def choose_action(self, observation): state = T.Tensor([observation]).to(self.actor.device) actions, _ = self.actor.sample_normal(state, reparameterize=False) return actions.cpu().detach().numpy()[0] def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: #different copies: exact VS soft tau = self.tau target_value_params = self.target_value.named_parameters() value_params = self.value.named_parameters() target_value_state_dict = dict(target_value_params) value_state_dict = dict(value_params) for name in value_state_dict: value_state_dict[name] = tau * value_state_dict[name].clone() + \ (1 - tau) * target_value_state_dict[name].clone() self.target_value.load_state_dict(value_state_dict) def save_models(self): print('.... saving models ....') self.actor.save_checkpoint() self.value.save_checkpoint() self.target_value.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() def load_models(self): print('.... loading models ....') self.actor.load_checkpoint() self.value.load_checkpoint() self.target_value.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() def learn(self): if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = \ #takes the batch self.memory.sample_buffer(self.batch_size) reward = T.tensor(reward, dtype=T.float).to(self.actor.device) #trasforms into tensors done = T.tensor(done).to(self.actor.device) state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device) state = T.tensor(state, dtype=T.float).to(self.actor.device) action = T.tensor(action, dtype=T.float).to(self.actor.device) value = self.value(state).view(-1) value_ = self.target_value(state_).view(-1) value_[done] = 0.0 #####_ sta usando 0 per dire True? @15, 17 actions, log_probs = self.actor.sample_normal(state, reparameterize=False) #takes the lower Q-values from 2 Critics to the Critic log_probs = log_probs.view(-1) q1_new_policy = self.critic_1.forward(state, actions) q2_new_policy = self.critic_2.forward(state, actions) critic_value = T.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) self.value.optimizer.zero_grad() value_target = critic_value - log_probs ####_ Perchè non prende semplicemente il critic_value? value_loss = 0.5 * F.mse_loss(value, value_target) value_loss.backward(retain_graph=True) self.value.optimizer.step() actions, log_probs = self.actor.sample_normal(state, reparameterize=True) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1.forward(state, actions) q2_new_policy = self.critic_2.forward(state, actions) critic_value = T.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) actor_loss = log_probs - critic_value actor_loss = T.mean(actor_loss) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q_hat = self.scale * reward + self.gamma * value_ #The scaling factor takes into account the entropy and encourage exploration q1_old_policy = self.critic_1.forward(state, action).view(-1) q2_old_policy = self.critic_2.forward(state, action).view(-1) critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) critic_loss = critic_1_loss + critic_2_loss critic_loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.update_network_parameters()
class Agent_2(): def __init__(self, alpha=0.00005, beta=0.00005, input_dims=5, env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2): self.gamma = 0.99 self.tau = tau self.memeory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions latent_dims = 10 self.actor = ActorNetwork_2(alpha, latent_dims, env.action_space.high, n_actions=n_actions) self.critic_1 = CriticNetwork(beta, latent_dims, n_actions, name='critic_det_1') self.critic_2 = CriticNetwork(beta, latent_dims, n_actions, name='critic__det_2') self.value = ValueNetwork(beta, latent_dims, name='value_det') self.target_value = ValueNetwork(beta, latent_dims, name='target_value_det') self.VAE = LinearVAE() self.scale = reward_scale self.update_network_parameters(tau=1) def choose_action(self, observation): state = torch.Tensor([observation]).to(self.actor.device) state_latent = self.VAE.sample_normal(state) actions = self.actor(state_latent) return actions.cpu().detach().numpy()[0] def remember(self, state, action, reward, new_state, done): self.memeory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau target_value_params = self.target_value.named_parameters() value_params = self.value.named_parameters() target_value_dict = dict(target_value_params) value_dict = dict(value_params) for name in target_value_dict: target_value_dict[name] = tau*value_dict[name].clone() + \ (1-tau)*target_value_dict[name].clone() self.target_value.load_state_dict(target_value_dict) def save_models(self): print('... saving models ...') self.actor.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() self.value.save_checkpoint() self.target_value.save_checkpoint() def load_models(self): print('... loading models ...') self.actor.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() self.value.load_checkpoint() self.target_value.load_checkpoint() def learn(self): if self.memeory.mem_cntr < self.batch_size: return states, new_states, actions, rewards, dones = self.memeory.sample_buffer( self.batch_size) states = torch.tensor(states, dtype=torch.float).to(self.actor.device) new_states = torch.tensor(new_states, dtype=torch.float).to(self.actor.device) actions = torch.tensor(actions, dtype=torch.float).to(self.actor.device) rewards = torch.tensor(rewards, dtype=torch.float).to(self.actor.device) dones = torch.tensor(dones).to(self.actor.device) # Train VAE with KL divergence + reconstruction_loss + log_probs reconstruction, mu, logvar, log_probs = self.VAE(states) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) reconstruction_loss = F.mse_loss(reconstruction, states) final_loss = KLD + reconstruction_loss self.VAE.optimizer.zero_grad() final_loss.backward(retain_graph=True) self.VAE.optimizer.step() latent_states = self.VAE.sample_normal(states) states_value = self.value(latent_states).view(-1) new_latent_states = self.VAE.sample_normal(new_states) new_states_value = self.target_value(new_latent_states).view(-1) new_states_value[dones] = 0.0 action = self.actor(latent_states) q1_new_policy = self.critic_1(latent_states, action) q2_new_policy = self.critic_2(latent_states, action) critic_value = torch.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) self.value.optimizer.zero_grad() value_target = critic_value value_loss = 0.5 * F.mse_loss(states_value, value_target) value_loss.backward(retain_graph=True) self.value.optimizer.step() actor_loss = -critic_value actor_loss = torch.mean(actor_loss) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q_hat = self.scale * rewards + self.gamma * new_states_value q1_old_policy = self.critic_1(latent_states, actions).view(-1) q2_old_policy = self.critic_2(latent_states, actions).view(-1) critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) critic_loss = critic1_loss + critic2_loss critic_loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.update_network_parameters() return final_loss, value_loss, actor_loss, critic_loss
class Agent(): def __init__(self, alpha=0.0003, beta=0.0003, input_dims=8, env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2): self.gamma = 0.99 self.tau = tau self.memeory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.actor = ActorNetwork(alpha, input_dims, env.action_space.high, n_actions=n_actions) self.critic_1 = CriticNetwork(beta, input_dims, n_actions, name='critic_1') self.critic_2 = CriticNetwork(beta, input_dims, n_actions, name='critic_2') self.value = ValueNetwork(beta, input_dims, name='value') self.target_value = ValueNetwork(beta, input_dims, name='target_value') self.scale = reward_scale self.update_network_parameters(tau=1) def choose_action(self, observation): state = torch.Tensor([observation]).to(self.actor.device) actions, _ = self.actor.sample_normal(state, reparameterize=False) return actions.cpu().detach().numpy()[0] def remember(self, state, action, reward, new_state, done): self.memeory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau target_value_params = self.target_value.named_parameters() value_params = self.value.named_parameters() target_value_dict = dict(target_value_params) value_dict = dict(value_params) for name in target_value_dict: target_value_dict[name] = tau*value_dict[name].clone() + \ (1-tau)*target_value_dict[name].clone() self.target_value.load_state_dict(target_value_dict) def save_models(self): print('... saving models ...') self.actor.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() self.value.save_checkpoint() self.target_value.save_checkpoint() def load_models(self): print('... loading models ...') self.actor.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() self.value.load_checkpoint() self.target_value.load_checkpoint() def learn(self): if self.memeory.mem_cntr < self.batch_size: return states, new_states, actions, rewards, dones = self.memeory.sample_buffer( self.batch_size) states = torch.tensor(states, dtype=torch.float).to(self.actor.device) new_states = torch.tensor(new_states, dtype=torch.float).to(self.actor.device) actions = torch.tensor(actions, dtype=torch.float).to(self.actor.device) rewards = torch.tensor(rewards, dtype=torch.float).to(self.actor.device) dones = torch.tensor(dones).to(self.actor.device) states_value = self.value(states).view(-1) new_states_value = self.target_value(new_states).view(-1) new_states_value[dones] = 0.0 action, log_probs = self.actor.sample_normal(states, reparameterize=False) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1(states, action) q2_new_policy = self.critic_2(states, action) critic_value = torch.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) self.value.optimizer.zero_grad() value_target = critic_value - log_probs value_loss = 0.5 * F.mse_loss(states_value, value_target) value_loss.backward(retain_graph=True) self.value.optimizer.step() action, log_probs = self.actor.sample_normal(states, reparameterize=True) log_probs = log_probs.view(-1) q1_new_policy = self.critic_1(states, action) q2_new_policy = self.critic_2(states, action) critic_value = torch.min(q1_new_policy, q2_new_policy) critic_value = critic_value.view(-1) actor_loss = log_probs - critic_value actor_loss = torch.mean(actor_loss) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q_hat = self.scale * rewards + self.gamma * new_states_value q1_old_policy = self.critic_1(states, actions).view(-1) q2_old_policy = self.critic_2(states, actions).view(-1) critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) critic_loss = critic1_loss + critic2_loss critic_loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.update_network_parameters() return value_loss, actor_loss, critic_loss def train_on_env(self, env): rewards = [] done = False observation = env.reset() while not done: action = self.choose_action(observation) observation_, reward, done, _ = env.step(action) self.remember(observation, action, reward, observation_, done) #if not load_checkpoints: self.learn() observation = observation_ rewards.append(reward) return np.sum(rewards) def generate_session(self, env, t_max=1000): states, traj_probs, actions, rewards = [], [], [], [] s = env.reset() q_t = 0 for t in range(t_max): state = torch.Tensor([s]).to(self.actor.device) action, log_probs = self.actor.sample_normal(state, reparameterize=False) action = action.cpu().detach().numpy()[0] new_s, r, done, info = env.step(action) log_probs = log_probs.cpu().detach().numpy()[0] #q_t *= probs q_t += log_probs[0] states.append(s.tolist()) traj_probs.append(q_t) actions.append(action[0]) rewards.append(r) s = new_s if done: break return np.array(states), np.array(traj_probs), np.array( actions), np.array(rewards)
class Agent(object): def __init__(self, alpha, beta, input_dims, action_bound, tau, env, gamma=0.99, n_actions=2, max_size=1000000, layer1_size=400, layer2_size=300, batch_size=64): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.action_bound = action_bound self.actor = ActorNetwork(alpha, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='Actor') self.critic = CriticNetwork(beta, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='Critic') self.target_actor = ActorNetwork(alpha, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='TargetActor') self.target_critic = CriticNetwork(beta, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='TargetCritic') self.noise = OUActionNoise(mu=np.zeros(n_actions)) self.update_network_parameters(tau=1) def choose_action(self, observation): self.actor.eval() observation = T.tensor(observation, dtype=T.float).to(self.actor.device) mu = self.actor.forward(observation).to(self.actor.device) mu_prime = mu + T.tensor(self.noise(), dtype=T.float).to( self.actor.device) self.actor.train() return (mu_prime * T.tensor(self.action_bound)).cpu().detach().numpy() def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def learn(self): if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = \ self.memory.sample_buffer(self.batch_size) reward = T.tensor(reward, dtype=T.float).to(self.critic.device) done = T.tensor(done).to(self.critic.device) new_state = T.tensor(new_state, dtype=T.float).to(self.critic.device) action = T.tensor(action, dtype=T.float).to(self.critic.device) state = T.tensor(state, dtype=T.float).to(self.critic.device) self.target_actor.eval() self.target_critic.eval() self.critic.eval() target_actions = self.target_actor.forward(new_state) critic_value_ = self.target_critic.forward(new_state, target_actions) critic_value = self.critic.forward(state, action) target = [] for j in range(self.batch_size): target.append(reward[j] + self.gamma * critic_value_[j] * done[j]) target = T.tensor(target).to(self.critic.device) target = target.view(self.batch_size, 1) self.critic.train() self.critic.optimizer.zero_grad() critic_loss = F.mse_loss(target, critic_value) critic_loss.backward() self.critic.optimizer.step() self.critic.eval() self.actor.optimizer.zero_grad() mu = self.actor.forward(state) self.actor.train() actor_loss = -self.critic.forward(state, mu) actor_loss = T.mean(actor_loss) actor_loss.backward() self.actor.optimizer.step() self.update_network_parameters() def update_network_parameters(self, tau=None): if tau is None: tau = self.tau actor_params = self.actor.named_parameters() critic_params = self.critic.named_parameters() target_actor_params = self.target_actor.named_parameters() target_critic_params = self.target_critic.named_parameters() critic_state_dict = dict(critic_params) actor_state_dict = dict(actor_params) target_critic_dict = dict(target_critic_params) target_actor_dict = dict(target_actor_params) for name in critic_state_dict: critic_state_dict[name] = tau*critic_state_dict[name].clone() + \ (1-tau)*target_critic_dict[name].clone() self.target_critic.load_state_dict(critic_state_dict) for name in actor_state_dict: actor_state_dict[name] = tau*actor_state_dict[name].clone() + \ (1-tau)*target_actor_dict[name].clone() self.target_actor.load_state_dict(actor_state_dict) """ #Verify that the copy assignment worked correctly target_actor_params = self.target_actor.named_parameters() target_critic_params = self.target_critic.named_parameters() critic_state_dict = dict(target_critic_params) actor_state_dict = dict(target_actor_params) print('\nActor Networks', tau) for name, param in self.actor.named_parameters(): print(name, T.equal(param, actor_state_dict[name])) print('\nCritic Networks', tau) for name, param in self.critic.named_parameters(): print(name, T.equal(param, critic_state_dict[name])) input() """ def save_models(self): self.actor.save_checkpoint() self.target_actor.save_checkpoint() self.critic.save_checkpoint() self.target_critic.save_checkpoint() def load_models(self): self.actor.load_checkpoint() self.target_actor.load_checkpoint() self.critic.load_checkpoint() self.target_critic.load_checkpoint() def check_actor_params(self): current_actor_params = self.actor.named_parameters() current_actor_dict = dict(current_actor_params) original_actor_dict = dict(self.original_actor.named_parameters()) original_critic_dict = dict(self.original_critic.named_parameters()) current_critic_params = self.critic.named_parameters() current_critic_dict = dict(current_critic_params) print('Checking Actor parameters') for param in current_actor_dict: print( param, T.equal(original_actor_dict[param], current_actor_dict[param])) print('Checking critic parameters') for param in current_critic_dict: print( param, T.equal(original_critic_dict[param], current_critic_dict[param])) input()
class Agent: def __init__(self, lr=0.003, input_dims=[4], env=None, gamma=0.99, n_actions=2, epsilon_greedy_start=0.5, epsilon_greedy_decay=0.0002, max_size=1000000, layer1_size=64, layer2_size=64, batch_size=128, writer=None): self.env = env self.gamma = gamma self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.epsilon_greedy_start = epsilon_greedy_start self.epsilon_greedy_decay = epsilon_greedy_decay self.net = Net(lr, input_dims, n_actions=n_actions, fc1_dims=layer1_size, fc2_dims=layer2_size, name='dqn') self.target_net = deepcopy(self.net) self.target_net.load_state_dict(self.net.state_dict()) self.target_net.eval() self.criterion = F.smooth_l1_loss self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.net.to(self.device) self.target_net.to(self.device) self.writer = writer def choose_action(self, state, timestep): epsilon = self.epsilon_greedy_start - self.epsilon_greedy_decay * timestep if random.random() <= epsilon: return self.env.action_space.sample() state = torch.from_numpy(state).to(self.device, torch.float) action = self.net.forward(state).max(0)[1].item() return action def target_update(self): self.target_net.load_state_dict(self.net.state_dict()) def model_update(self, timestep): if len(self.memory) < self.batch_size: return states, actions, rewards, states_, terminals = self.memory.sample_buffer( self.batch_size) states = states.to(self.device) actions = actions.to(self.device) rewards = rewards.to(self.device) states_ = states_.to(self.device) terminals = terminals.to(self.device) state_actinon_values = self.net.forward(states.to(torch.float)) state_actinon_values = state_actinon_values.gather( 1, actions[:, 0].unsqueeze(1).to(torch.long)).squeeze(1) with torch.no_grad(): next_state_values = self.target_net(states_.to( torch.float)).max(1)[0].detach() expected_action_values = self.gamma * next_state_values + rewards expected_action_values = expected_action_values * ( 1 - terminals.to(torch.uint8)) loss = self.criterion(state_actinon_values, expected_action_values.to(torch.float)) self.writer.add_scalar("loss", loss.item(), timestep) self.net.optimizer.zero_grad() loss.backward() for param in self.net.parameters(): param.grad.data.clamp_(-1, 1) self.net.optimizer.step() def store_transition(self, state, action, reward, state_, done): self.memory.store_transtions(state, action, reward, state_, done)
class Agent(): def __init__(self, input_dims, env, n_actions): self.memory = ReplayBuffer(input_dims) self.n_actions = n_actions self.actor_nn = ActorNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_actor', max_action=env.action_space.n) self.critic_local_1_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_local_1') self.critic_local_2_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_local_2') self.critic_target_1_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_target_1') self.critic_target_2_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_target_2') def choose_action(self, observation): state = T.Tensor([observation]).to(Constants.device) _, max_probability_action = self.actor_nn.sample_action(state) return max_probability_action def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def learn(self): if self.memory.mem_cntr < Hyper.batch_size: return state, action, reward, next_state, done = self.memory.sample_buffer() reward = T.tensor(reward, dtype=T.float).to(Constants.device) done = T.tensor(done).to(Constants.device) next_state = T.tensor(next_state, dtype=T.float).to(Constants.device) state = T.tensor(state, dtype=T.float).to(Constants.device) action = T.tensor(action, dtype=T.float).to(Constants.device) (action_probabilities, log_action_probabilities), _ = self.actor_nn.sample_action(next_state) with T.no_grad(): action_logits1 = self.critic_target_1_nn(next_state) q1_new_policy = T.argmax(action_logits1, dim=1, keepdim=True) action_logits2 = self.critic_target_2_nn(next_state) q2_new_policy = T.argmax(action_logits2, dim=1, keepdim=True) q_value = T.min(q1_new_policy, q2_new_policy) min_qf_next_target = action_probabilities * ( q_value - Hyper.alpha * log_action_probabilities) min_qf_next_target_sum = min_qf_next_target.sum( dim=1).unsqueeze(-1) not_done = (1.0 - done * 1).unsqueeze(-1) next_q_value = reward.unsqueeze( -1) + not_done * Hyper.gamma * (min_qf_next_target_sum) action_logits1 = self.critic_local_1_nn(state).gather(1, action.long()) q_value1 = action_logits1.sum(dim=1).unsqueeze(-1) action_logits2 = self.critic_local_2_nn(state).gather(1, action.long()) q_value2 = action_logits2.sum(dim=1).unsqueeze(-1) self.critic_local_1_nn.optimizer.zero_grad() self.critic_local_2_nn.optimizer.zero_grad() critic_1_loss = F.mse_loss(q_value1, next_q_value) critic_2_loss = F.mse_loss(q_value2, next_q_value) critic_1_loss.backward() critic_2_loss.backward() self.critic_local_1_nn.optimizer.step() self.critic_local_2_nn.optimizer.step() (action_probabilities, log_action_probabilities), _ = self.actor_nn.sample_action(state) # ------------------------------------------------------------------------------------------- # Calculates the loss for the actor. This loss includes the additional entropy term # CHANGE0003 Soft state-value where actions are discrete self.actor_nn.optimizer.zero_grad() action_logits1 = self.critic_target_1_nn(state) q1_new_policy = T.argmax(action_logits1, dim=1, keepdim=True) action_logits2 = self.critic_target_2_nn(state) q2_new_policy = T.argmax(action_logits2, dim=1, keepdim=True) q_value = T.min(q1_new_policy, q2_new_policy) inside_term = Hyper.alpha * log_action_probabilities - q_value policy_loss = (action_probabilities * inside_term).sum(dim=1).mean() policy_loss.backward(retain_graph=True) self.actor_nn.optimizer.step() self.update_q_weights() def update_q_weights(self): local_1_parameters = self.critic_local_1_nn.named_parameters() local_2_parameters = self.critic_local_2_nn.named_parameters() target_1_parameters = self.critic_target_1_nn.named_parameters() target_2_parameters = self.critic_target_2_nn.named_parameters() self.update_network_parameters_line(target_1_parameters, local_1_parameters, Hyper.tau) self.update_network_parameters_line(target_2_parameters, local_2_parameters, Hyper.tau) def update_network_parameters_line(self, target_params, local_params, tau): for target_param, local_param in zip(target_params, local_params): target_param[1].data.copy_(tau * local_param[1].data + (1.0 - tau) * target_param[1].data) def save_models(self): print('.... saving models ....') self.actor_nn.save_checkpoint() self.critic_local_1_nn.save_checkpoint() self.critic_local_2_nn.save_checkpoint() self.critic_target_1_nn.save_checkpoint() self.critic_target_2_nn.save_checkpoint() def load_models(self): print('.... loading models ....') self.actor_nn.load_checkpoint() self.critic_local_1_nn.load_checkpoint() self.critic_local_2_nn.load_checkpoint() self.critic_target_1_nn.load_checkpoint() self.critic_target_2_nn.load_checkpoint()
class Agent(): def __init__(self, alpha=0.0001, beta=.001, input_dims=[8], gamma=.99, n_actions=2, max_size=1000000, layer1_size=256, layer2_size=256, tau=.005, batch_size=256, reward_scale=2): # reward scales depends on action convention for the environment\ self.alpha = alpha self.beta = beta self.gamma = gamma self.tau = tau self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions) self.critic = CriticNetwork(beta, input_dims, n_actions=n_actions) self.target_actor = ActorNetwork(alpha, input_dims, n_actions=n_actions) self.target_critic = CriticNetwork(beta, input_dims, n_actions=n_actions) self.actor_optimizer = T.optim.Adam(self.actor.parameters(), lr=self.alpha) self.critic_optimizer = T.optim.Adam(self.critic.parameters(), lr=self.beta) self.hard_update(self.target_actor, self.actor) self.hard_update(self.target_critic, self.critic) self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.random = OrnsteinUhlenbeckProcess(size=n_actions, theta=.15, mu=0.0, sigma=.2) self.batch_size = batch_size self.s_t = None self.a_t = None def hard_update(self, target, source): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(param.data) def choose_action(self, observation): # here we turn into a tensor observation = T.tensor(observation, dtype=T.float).to(self.actor.device) action = self.actor(observation) # print(action," d " ,action.detach().numpy()," d ",self.random.sample()) return action.detach().numpy() + self.random.sample() def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): pass #depending on models, we need to save or load def save_models(self): print("saving models:") self.actor.save_checkpoint() self.critic.save_checkpoint() self.target_critic.save_checkpoint() self.target_actor.save_checkpoint() def load_models(self): print("loading models:") self.actor.load_checkpoint() self.critic.load_checkpoint() self.target_critic.load_checkpoint() self.target_actor.load_checkpoint() def learn(self): # must fully load up memory, otherwise must keep learning if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = self.memory.sample_buffer( self.batch_size) reward_batch = T.tensor(reward, dtype=T.float).to(self.actor.device) done_batch = T.tensor(done).to(self.actor.device) next_state_batch = T.tensor(new_state, dtype=T.float).to(self.actor.device) state_batch = T.tensor(state, dtype=T.float).to(self.actor.device) action_batch = T.tensor(action, dtype=T.float).to(self.actor.device) next_q_values = self.target_critic(next_state_batch, self.target_actor(next_state_batch)) # print("next q batch",next_q_values.size()) # print("reward batch", reward_batch.size(), " done_batch",done_batch.unsqueeze(1).size(),next_q_values.size()) target_q_batch = reward_batch.unsqueeze( 1) + self.gamma * (~done_batch).unsqueeze(1) * next_q_values # print("target q batch",target_q_batch.size()) #critic update self.critic_optimizer.zero_grad() q_batch = self.critic(state_batch, action_batch) value_loss = F.mse_loss(q_batch, target_q_batch) value_loss.backward() self.critic_optimizer.step() # actor update self.actor_optimizer.zero_grad() policy_loss = -self.critic(state_batch, self.actor(state_batch)) policy_loss = policy_loss.mean() policy_loss.backward() self.actor_optimizer.step() self.soft_update(self.target_actor, self.actor, self.tau) self.soft_update(self.target_critic, self.critic, self.tau) def soft_update(self, target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
class Agent(): def __init__(self, alpha, beta, input_dims, tau, env, action_bound, gamma=0.99, update_actor_interval=2, warmup=1000, n_actions=2, max_size=1000000, layer1_size=400, layer2_size=300, batch_size=100, noise=0.1): self.gamma = gamma self.tau = tau self.max_action = env.action_space.high self.min_action = env.action_space.low self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.learn_step_cntr = 0 self.time_step = 0 self.warmup = warmup self.n_actions = n_actions self.update_actor_iter = update_actor_interval self.action_bound = action_bound self.actor = ActorNetwork(alpha, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='actor') self.critic_1 = CriticNetwork(beta, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='critic_1') self.critic_2 = CriticNetwork(beta, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='critic_2') self.target_actor = ActorNetwork(alpha, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='target_actor') self.target_critic_1 = CriticNetwork(beta, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='target_critic_1') self.target_critic_2 = CriticNetwork(beta, input_dims, layer1_size, layer2_size, n_actions=n_actions, name='target_critic_2') self.noise = noise self.update_network_parameters(tau=1) def choose_action(self, observation): if self.time_step < self.warmup: mu = T.tensor( np.random.normal(scale=self.noise, size=(self.n_actions, ))) else: state = T.tensor(observation, dtype=T.float).to(self.actor.device) mu = self.actor.forward(state).to(self.actor.device) mu_prime = mu + T.tensor(np.random.normal(scale=self.noise), dtype=T.float).to(self.actor.device) mu_prime = T.clamp(mu_prime, self.min_action[0], self.max_action[0]) self.time_step += 1 return (mu_prime * T.tensor(self.action_bound)).cpu().detach().numpy() def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def learn(self): if self.memory.mem_cntr < self.batch_size: return state, action, reward, new_state, done = \ self.memory.sample_buffer(self.batch_size) reward = T.tensor(reward, dtype=T.float).to(self.critic_1.device) done = T.tensor(done).to(self.critic_1.device) state_ = T.tensor(new_state, dtype=T.float).to(self.critic_1.device) state = T.tensor(state, dtype=T.float).to(self.critic_1.device) action = T.tensor(action, dtype=T.float).to(self.critic_1.device) target_actions = self.target_actor.forward(state_) target_actions = target_actions + \ T.clamp(T.tensor(np.random.normal(scale=0.2)), -0.5, 0.5) target_actions = T.clamp(target_actions, self.min_action[0], self.max_action[0]) q1_ = self.target_critic_1.forward(state_, target_actions) q2_ = self.target_critic_2.forward(state_, target_actions) q1 = self.critic_1.forward(state, action) q2 = self.critic_2.forward(state, action) q1_[done] = 0.0 q2_[done] = 0.0 q1_ = q1_.view(-1) q2_ = q2_.view(-1) critic_value_ = T.min(q1_, q2_) target = reward + self.gamma * critic_value_ target = target.view(self.batch_size, 1) self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q1_loss = F.mse_loss(target, q1) q2_loss = F.mse_loss(target, q2) critic_loss = q1_loss + q2_loss critic_loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.learn_step_cntr += 1 if self.learn_step_cntr % self.update_actor_iter != 0: return self.actor.optimizer.zero_grad() actor_q1_loss = self.critic_1.forward(state, self.actor.forward(state)) actor_loss = -T.mean(actor_q1_loss) actor_loss.backward() self.actor.optimizer.step() self.update_network_parameters() def update_network_parameters(self, tau=None): if tau is None: tau = self.tau actor_params = self.actor.named_parameters() critic_1_params = self.critic_1.named_parameters() critic_2_params = self.critic_2.named_parameters() target_actor_params = self.target_actor.named_parameters() target_critic_1_params = self.target_critic_1.named_parameters() target_critic_2_params = self.target_critic_2.named_parameters() critic_1 = dict(critic_1_params) critic_2 = dict(critic_2_params) actor = dict(actor_params) target_actor = dict(target_actor_params) target_critic_1 = dict(target_critic_1_params) target_critic_2 = dict(target_critic_2_params) for name in critic_1: critic_1[name] = tau*critic_1[name].clone() + \ (1-tau)*target_critic_1[name].clone() for name in critic_2: critic_2[name] = tau*critic_2[name].clone() + \ (1-tau)*target_critic_2[name].clone() for name in actor: actor[name] = tau*actor[name].clone() + \ (1-tau)*target_actor[name].clone() self.target_critic_1.load_state_dict(critic_1) self.target_critic_2.load_state_dict(critic_2) self.target_actor.load_state_dict(actor) def save_models(self): self.actor.save_checkpoint() self.target_actor.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() self.target_critic_1.save_checkpoint() self.target_critic_2.save_checkpoint() def load_models(self): self.actor.load_checkpoint() self.target_actor.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() self.target_critic_1.load_checkpoint() self.target_critic_2.load_checkpoint()
class Agent: def __init__(self, alpha=3e-4, beta=3e-4, input_dims=[8], env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=5e-3, fc1_dim=256, fc2_dim=256, batch_size=256, reward_scale=2): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.actor = ActorNetwork(alpha, input_dims, n_actions, env.action_space.high) self.critic1 = CriticNetwork(beta, input_dims, n_actions, name='critic1') self.critic2 = CriticNetwork(beta, input_dims, n_actions, name='critic2') self.value = ValueNetwork(beta, input_dims, name='value') self.target_value = ValueNetwork(beta, input_dims, name='target_value') self.scale = reward_scale self.update_network_parameters(tau=1) def choose_action(self, obs): state = T.Tensor([obs]).to(self.actor.device) actions, _ = self.actor.sample_normal(state, reparameterize=False) return actions.cpu().detach().numpy()[0] def remember(self, state, n_state, action, reward, done): self.memory.store_transition(state, n_state, action, reward, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau trg_value_params = self.target_value.named_parameters() value_params = self.value.named_parameters() trg_value_state_dict = dict(trg_value_params) value_state_dict = dict(value_params) for name in value_state_dict: value_state_dict[name] = tau*value_state_dict[name].clone() + \ (1-tau)*trg_value_state_dict[name].clone() self.target_value.load_state_dict(value_state_dict) def save_models(self): print('... saving models ...') self.actor.save_ckpt() self.value.save_ckpt() self.target_value.save_ckpt() self.critic1.save_ckpt() self.critic2.save_ckpt() def load_models(self): print('... loading models ...') self.actor.load_ckpt() self.value.load_ckpt() self.target_value.load_ckpt() self.critic1.load_ckpt() self.critic2.load_ckpt() def learn(self): if self.memory.mem_ptr < self.batch_size: return s, ns, a, r, t = \ self.memory.sample_buffer(self.batch_size) s = T.Tensor(s).to(self.actor.device) ns = T.Tensor(ns).to(self.actor.device) a = T.Tensor(a).to(self.actor.device) r = T.Tensor(r).to(self.actor.device) t = T.tensor(t).to(self.actor.device) # update value net value = self.value(s).view(-1) value_ = self.target_value(ns).view(-1) value_[t] = 0.0 actions, logprobs = self.actor.sample_normal(s, reparameterize=False) logprobs = logprobs.view(-1) critic_value = T.min(self.critic1(s, actions), self.critic2(s, actions)) critic_value = critic_value.view(-1) value_target = critic_value - logprobs value_loss = 0.5 * F.mse_loss(value, value_target) self.value.optimizer.zero_grad() value_loss.backward(retain_graph=True) self.value.optimizer.step() # update actor net actions, logprobs = self.actor.sample_normal(s, reparameterize=True) logprobs = logprobs.view(-1) critic_value = T.min(self.critic1(s, actions), self.critic2(s, actions)) critic_value = critic_value.view(-1) actor_loss = T.mean(logprobs - critic_value) self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor.optimizer.step() # update critic net q_hat = self.scale * r + self.gamma * value_ q1 = self.critic1(s, a).view(-1) q2 = self.critic2(s, a).view(-1) critic1_loss = 0.5 * F.mse_loss(q_hat, q1) critic2_loss = 0.5 * F.mse_loss(q_hat, q2) critic_loss = critic1_loss + critic2_loss self.critic1.optimizer.zero_grad() self.critic2.optimizer.zero_grad() critic_loss.backward() self.critic1.optimizer.step() self.critic2.optimizer.step() self.update_network_parameters()
class Agent(): def __init__(self, input_dims, env, n_actions): self.memory = ReplayBuffer(input_dims) self.n_actions = n_actions self.actor_nn = ActorNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_actor', max_action=env.action_space.n) self.critic_local_1_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_local_1') self.critic_local_2_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_local_2') self.critic_target_1_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_target_1') self.critic_target_2_nn = CriticNetwork(input_dims, n_actions=n_actions, name=Constants.env_id + '_critic_target_2') self.value_nn = ValueNetwork(input_dims, name=Constants.env_id + '_value') self.target_value_nn = ValueNetwork(input_dims, name=Constants.env_id + '_target_value') self.update_network_parameters(tau=1) def choose_action(self, observation): state = T.Tensor([observation]).to(Constants.device) _, max_probability_action = self.actor_nn.sample_action(state) return max_probability_action def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def learn(self): if self.memory.mem_cntr < Hyper.batch_size: return state, action, reward, next_state, done = self.memory.sample_buffer() reward = T.tensor(reward, dtype=T.float).to(Constants.device) done = T.tensor(done).to(Constants.device) next_state = T.tensor(next_state, dtype=T.float).to(Constants.device) state = T.tensor(state, dtype=T.float).to(Constants.device) action = T.tensor(action, dtype=T.float).to(Constants.device) # value_from_nn = self.value_nn(state).view(-1) value_from_nn = self.value_nn(state) new_value_from_nn = self.target_value_nn(next_state).view(-1) new_value_from_nn[done] = 0.0 (action_probabilities, log_action_probabilities), _ = self.actor_nn.sample_action(next_state) with T.no_grad(): q1_new_policy = self.critic_target_1_nn(next_state) q2_new_policy = self.critic_target_2_nn(next_state) critic_value = T.min(q1_new_policy, q2_new_policy) self.value_nn.optimizer.zero_grad() # CHANGE0003 Soft state-value where actions are discrete inside_term = Hyper.alpha * log_action_probabilities - critic_value #value_target = action_probabilities * (critic_value - Hyper.alpha * log_action_probabilities) value_loss = (action_probabilities * inside_term).sum(dim=1).mean() value_loss.backward(retain_graph=True) self.value_nn.optimizer.step() (action_probabilities, log_action_probabilities), _ = self.actor_nn.sample_action(state) with T.no_grad(): q1_new_policy = self.critic_local_1_nn(state) q2_new_policy = self.critic_local_1_nn(state) critic_value = T.min(q1_new_policy, q2_new_policy) # CHANGE0005 Objective for policy actor_loss = action_probabilities * ( Hyper.alpha * log_action_probabilities - critic_value) actor_loss = T.mean(actor_loss) self.actor_nn.optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor_nn.optimizer.step() self.critic_local_1_nn.optimizer.zero_grad() self.critic_local_2_nn.optimizer.zero_grad() q_hat = Hyper.reward_scale * reward + Hyper.gamma * new_value_from_nn action_logits1 = self.critic_local_1_nn(state) q1_old_policy = T.argmax(action_logits1, dim=1, keepdim=True).view(-1) action_logits2 = self.critic_local_2_nn(state) q2_old_policy = T.argmax(action_logits2, dim=1, keepdim=True).view(-1) critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat) critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat) critic_loss = critic_1_loss + critic_2_loss critic_loss.backward() self.critic_local_1_nn.optimizer.step() self.critic_local_2_nn.optimizer.step() self.update_network_parameters() def update_network_parameters(self, tau=None): if tau is None: tau = Hyper.tau target_value_params = self.target_value_nn.named_parameters() value_params = self.value_nn.named_parameters() target_value_state_dict = dict(target_value_params) value_state_dict = dict(value_params) for name in value_state_dict: value_state_dict[name] = tau*value_state_dict[name].clone() + \ (1-tau)*target_value_state_dict[name].clone() self.target_value_nn.load_state_dict(value_state_dict) self.update_network_parameters_line( self.critic_target_1_nn.named_parameters(), self.critic_local_1_nn.named_parameters(), tau) self.update_network_parameters_line( self.critic_target_2_nn.named_parameters(), self.critic_local_2_nn.named_parameters(), tau) def update_network_parameters_line(self, target_params, local_params, tau): for target_param, local_param in zip(target_params, local_params): target_param[1].data.copy_(tau * local_param[1].data + (1.0 - tau) * target_param[1].data) def save_models(self): print('.... saving models ....') self.actor_nn.save_checkpoint() self.value_nn.save_checkpoint() self.target_value_nn.save_checkpoint() self.critic_local_1_nn.save_checkpoint() self.critic_local_2_nn.save_checkpoint() self.critic_target_1_nn.save_checkpoint() self.critic_target_2_nn.save_checkpoint() def load_models(self): print('.... loading models ....') self.actor_nn.load_checkpoint() self.value_nn.load_checkpoint() self.target_value_nn.load_checkpoint() self.critic_local_1_nn.load_checkpoint() self.critic_local_2_nn.load_checkpoint() self.critic_target_1_nn.load_checkpoint() self.critic_target_2_nn.load_checkpoint()
class Agent(): def __init__(self, alpha=0.0003, beta= 0.0003, input_dims=[8], env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, ent_alpha = 0.0001, batch_size=256, reward_scale=2, layer1_size=256, layer2_size=256, chkpt_dir='tmp/sac'): self.gamma = gamma self.tau = tau self.memory = ReplayBuffer(max_size, input_dims, n_actions) self.batch_size = batch_size self.n_actions = n_actions self.ent_alpha = ent_alpha self.reward_scale = reward_scale self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions, fc1_dims=layer1_size, fc2_dims=layer2_size , name='actor', chkpt_dir=chkpt_dir) self.critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions, fc1_dims=layer1_size, fc2_dims=layer2_size ,name='critic_1', chkpt_dir=chkpt_dir) self.critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions, fc1_dims=layer1_size, fc2_dims=layer2_size ,name='critic_2', chkpt_dir=chkpt_dir) self.target_critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions, fc1_dims=layer1_size, fc2_dims=layer2_size ,name='target_critic_1', chkpt_dir=chkpt_dir) self.target_critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions, fc1_dims=layer1_size, fc2_dims=layer2_size ,name='target_critic_2', chkpt_dir=chkpt_dir) self.update_network_parameters(tau=1) def choose_actions(self, observation, learn_mode=False): if not learn_mode: state = T.Tensor([observation]).to(self.actor.device) else: state = observation action_probs = self.actor.forward(state) max_probability_action = T.argmax(action_probs, dim=-1) action_distribution = Categorical(action_probs) action = action_distribution.sample().cpu().detach().numpy()[0] z = action_probs == 0.0 z = z.float()*1e-8 log_probs = T.log(action_probs + z) return action, action_probs, log_probs, max_probability_action def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) def update_network_parameters(self, tau=None): if tau is None: tau = self.tau target_critic_1_params = self.target_critic_1.named_parameters() target_critic_2_params = self.target_critic_2.named_parameters() critic_1_params = self.critic_1.named_parameters() critic_2_params = self.critic_2.named_parameters() target_critic_1_state_dict = dict(target_critic_1_params) critic_1_state_dict = dict(critic_1_params) target_critic_2_state_dict = dict(target_critic_2_params) critic_2_state_dict = dict(critic_2_params) for name in critic_1_state_dict: critic_1_state_dict[name] = tau*critic_1_state_dict[name].clone() + (1-tau)*target_critic_1_state_dict[name].clone() for name in critic_2_state_dict: critic_2_state_dict[name] = tau*critic_2_state_dict[name].clone() + (1-tau)*target_critic_2_state_dict[name].clone() self.target_critic_1.load_state_dict(critic_1_state_dict) self.target_critic_2.load_state_dict(critic_2_state_dict) def save_models(self): # print('.....saving models.....') self.actor.save_checkpoint() self.critic_1.save_checkpoint() self.critic_2.save_checkpoint() self.target_critic_1.save_checkpoint() self.target_critic_2.save_checkpoint() def load_models(self): print('.....loading models.....') self.actor.load_checkpoint() self.critic_1.load_checkpoint() self.critic_2.load_checkpoint() self.target_critic_1.load_checkpoint() self.target_critic_2.load_checkpoint() def learn(self): if self.memory.mem_cntr < self.batch_size: return 10, 10, 10, 10, 10 state, action, reward, next_state, done = self.memory.sample_buffer(self.batch_size) reward = T.tensor(reward, dtype=T.float).to(self.actor.device) done = T.tensor(done, dtype=T.float).to(self.actor.device) state_ = T.tensor(next_state, dtype=T.float).to(self.actor.device) state = T.tensor(state, dtype=T.float).to(self.actor.device) action = T.tensor(action, dtype=T.float).to(self.actor.device) # Critics Learning with T.no_grad(): action_, probs, log_probs, max_action = self.choose_actions(state_, learn_mode=True) qf1_target_ = self.target_critic_1(state_) qf2_target_ = self.target_critic_2(state_) min_qf_target_ = probs*(T.min(qf1_target_, qf2_target_) - self.ent_alpha*log_probs) min_qf_target_ = min_qf_target_.sum(dim=1).view(-1) next_q_value = reward + (1.0-done)*self.gamma*min_qf_target_ action = action.view(self.batch_size, 1) qf1 = self.critic_1(state).gather(1, action.long()) qf2 = self.critic_2(state).gather(1, action.long()) self.critic_1.optimizer.zero_grad() qf1_loss = F.mse_loss(qf1, next_q_value) qf1_loss.backward(retain_graph=False) self.critic_1.optimizer.step() self.critic_2.optimizer.zero_grad() qf2_loss = F.mse_loss(qf2, next_q_value) qf2_loss.backward(retain_graph=False) self.critic_2.optimizer.step() self.update_network_parameters() # Actor loss qf1_pi = self.critic_1(state) qf2_pi = self.critic_2(state) min_qf_pi = T.min(qf1_pi, qf2_pi) inside_term = self.ent_alpha*log_probs - min_qf_pi actor_loss = (probs*inside_term).sum(dim=1).mean() self.actor.optimizer.zero_grad() actor_loss.backward(retain_graph=False) self.actor.optimizer.step() def compute_grads(self): if self.memory.mem_cntr < self.batch_size: return False state, action, reward, next_state, done = self.memory.sample_buffer(self.batch_size) reward = T.tensor(reward, dtype=T.float).to(self.actor.device) done = T.tensor(done, dtype=T.float).to(self.actor.device) state_ = T.tensor(next_state, dtype=T.float).to(self.actor.device) action = T.tensor(action, dtype=T.float).to(self.actor.device) state = T.tensor(state, dtype=T.float).to(self.actor.device) state.requires_grad = True with T.no_grad(): action_, probs, log_probs, max_action = self.choose_actions(state_, learn_mode=True) qf1_target_ = self.target_critic_1(state_) qf2_target_ = self.target_critic_2(state_) min_qf_target_ = probs*(T.min(qf1_target_, qf2_target_) - self.ent_alpha*log_probs) min_qf_target_ = min_qf_target_.sum(dim=1).view(-1) next_q_value = reward + (1.0-done)*self.gamma*min_qf_target_ self.actor.optimizer.zero_grad() qf1_pi = self.critic_1(state) qf2_pi = self.critic_2(state) min_qf_pi = T.min(qf1_pi, qf2_pi) inside_term = self.ent_alpha*log_probs - min_qf_pi actor_loss = (probs*inside_term).sum(dim=1).mean() actor_loss.backward() data_grad = state.grad.data return data_grad.mean(axis=0)