示例#1
0
class Agent():
    def __init__(self,
                 alpha=0.0003,
                 beta=0.0003,
                 input_dims=[8],
                 env=None,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 tau=0.005,
                 layer1_size=256,
                 layer2_size=256,
                 batch_size=256,
                 reward_scale=2):
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions

        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  n_actions=n_actions,
                                  name='actor',
                                  max_action=env.action_space.high)
        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      n_actions=n_actions,
                                      name='critic_1')
        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      n_actions=n_actions,
                                      name='critic_2')
        self.value = ValueNetwork(beta, input_dims, name='value')
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')

        self.scale = reward_scale
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = T.Tensor([observation]).to(self.actor.device)
        actions, _ = self.actor.sample_normal(state, reparameterize=False)

        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)

        for name in value_state_dict:
            value_state_dict[name] = tau*value_state_dict[name].clone() + \
                    (1-tau)*target_value_state_dict[name].clone()

        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        print('.... saving models ....')
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()

    def load_models(self):
        print('.... loading models ....')
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return

        state, action, reward, new_state, done = \
                self.memory.sample_buffer(self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.actor.device)
        done = T.tensor(done).to(self.actor.device)
        state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device)
        state = T.tensor(state, dtype=T.float).to(self.actor.device)
        action = T.tensor(action, dtype=T.float).to(self.actor.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1.forward(state, actions)
        q2_new_policy = self.critic_2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1.forward(state, actions)
        q2_new_policy = self.critic_2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = T.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()
        q_hat = self.scale * reward + self.gamma * value_
        q1_old_policy = self.critic_1.forward(state, action).view(-1)
        q2_old_policy = self.critic_2.forward(state, action).view(-1)
        critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.update_network_parameters()
class Agent():
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 n_actions,
                 gamma=0.99,
                 max_size=50000,
                 fc1_dims=400,
                 fc2_dims=300,
                 batch_size=32):
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.alpha = alpha
        self.beta = beta
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.noise = OUActionNoise(mu=np.zeros(n_actions))
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  fc1_dims,
                                  fc2_dims,
                                  n_actions=n_actions,
                                  name='actor')
        self.critic = CriticNetwork(beta,
                                    input_dims,
                                    fc1_dims,
                                    fc2_dims,
                                    n_actions=n_actions,
                                    name='critic')

        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         fc1_dims,
                                         fc2_dims,
                                         n_actions=n_actions,
                                         name='target_actor')
        self.target_critic = CriticNetwork(beta,
                                           input_dims,
                                           fc1_dims,
                                           fc2_dims,
                                           n_actions=n_actions,
                                           name='target_critic')

        self.update_network_parameters(
            tau=1)  # for the first time target_actor and actor are same

    def choose_action(self, observation):
        self.actor.eval(
        )  # we are setting our actor network to eval mode because we have batch normalization layer
        # and we dont want to calculate statistics for that layer at this step
        state = T.tensor([observation], dtype=T.float).to(self.actor.device)
        mu = self.actor.forward(state).to(self.actor.device)
        mu_prime = mu + T.tensor(self.noise(), dtype=T.float).to(
            self.actor.device)
        self.actor.train()
        return mu_prime.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, state_, done):
        self.memory.store_transition(state, action, reward, state_, done)

    def save_models(self):
        self.actor.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_critic.save_checkpoint()

    def load_models(self):
        self.actor.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_critic.load_checkpoint()

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return

        states, actions, rewards, states_, done = self.memory.sample_buffer(
            self.batch_size)

        states = T.tensor(states, dtype=T.float).to(self.actor.device)
        states_ = T.tensor(states_, dtype=T.float).to(self.actor.device)
        actions = T.tensor(actions, dtype=T.float).to(self.actor.device)
        rewards = T.tensor(rewards, dtype=T.float).to(self.actor.device)
        done = T.tensor(done).to(self.actor.device)

        target_actions = self.target_actor.forward(states_)
        critic_value_ = self.target_critic.forward(states_, target_actions)
        critic_value = self.critic.forward(states, actions)

        critic_value_[done] = 0.0
        critic_value_ = critic_value_.view(-1)

        target = rewards + self.gamma * critic_value_
        target = target.view(self.batch_size, 1)

        self.critic.optimizer.zero_grad()
        critic_loss = F.mse_loss(target, critic_value)
        critic_loss.backward()
        self.critic.optimizer.step()

        self.actor.optimizer.zero_grad()
        actor_loss = -self.critic.forward(states, self.actor.forward(states))
        actor_loss = T.mean(actor_loss)
        actor_loss.backward()
        self.actor.optimizer.step()

        self.update_network_parameters(
        )  # sending tau None so that local tau variable there takes the value of class tau variable

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        actor_params = self.actor.named_parameters()
        critic_params = self.critic.named_parameters()
        target_actor_params = self.target_actor.named_parameters()
        target_critic_params = self.target_critic.named_parameters()

        critic_state_dict = dict(critic_params)
        actor_state_dict = dict(actor_params)
        target_critic_state_dict = dict(target_critic_params)
        target_actor_state_dict = dict(target_actor_params)

        for name in critic_state_dict:
            critic_state_dict[name] = tau * critic_state_dict[name].clone() + (
                1 - tau) * target_critic_state_dict[name].clone()

        for name in actor_state_dict:
            actor_state_dict[name] = tau * actor_state_dict[name].clone() + (
                1 - tau) * target_actor_state_dict[name].clone()

        self.target_critic.load_state_dict(critic_state_dict)
        self.target_actor.load_state_dict(actor_state_dict)
示例#3
0
class Agent_2():
    def __init__(self,
                 alpha=0.00005,
                 beta=0.00005,
                 input_dims=5,
                 env=None,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 tau=0.005,
                 layer1_size=256,
                 layer2_size=256,
                 batch_size=256,
                 reward_scale=2):
        self.gamma = 0.99
        self.tau = tau
        self.memeory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions
        latent_dims = 10

        self.actor = ActorNetwork_2(alpha,
                                    latent_dims,
                                    env.action_space.high,
                                    n_actions=n_actions)
        self.critic_1 = CriticNetwork(beta,
                                      latent_dims,
                                      n_actions,
                                      name='critic_det_1')
        self.critic_2 = CriticNetwork(beta,
                                      latent_dims,
                                      n_actions,
                                      name='critic__det_2')
        self.value = ValueNetwork(beta, latent_dims, name='value_det')
        self.target_value = ValueNetwork(beta,
                                         latent_dims,
                                         name='target_value_det')
        self.VAE = LinearVAE()

        self.scale = reward_scale
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = torch.Tensor([observation]).to(self.actor.device)
        state_latent = self.VAE.sample_normal(state)
        actions = self.actor(state_latent)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memeory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau
        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_dict = dict(target_value_params)
        value_dict = dict(value_params)

        for name in target_value_dict:
            target_value_dict[name] = tau*value_dict[name].clone() + \
                (1-tau)*target_value_dict[name].clone()

        self.target_value.load_state_dict(target_value_dict)

    def save_models(self):
        print('... saving models ...')
        self.actor.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()

    def load_models(self):
        print('... loading models ...')
        self.actor.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()

    def learn(self):
        if self.memeory.mem_cntr < self.batch_size:
            return

        states, new_states, actions, rewards, dones = self.memeory.sample_buffer(
            self.batch_size)
        states = torch.tensor(states, dtype=torch.float).to(self.actor.device)
        new_states = torch.tensor(new_states,
                                  dtype=torch.float).to(self.actor.device)
        actions = torch.tensor(actions,
                               dtype=torch.float).to(self.actor.device)
        rewards = torch.tensor(rewards,
                               dtype=torch.float).to(self.actor.device)
        dones = torch.tensor(dones).to(self.actor.device)

        # Train VAE with KL divergence + reconstruction_loss + log_probs
        reconstruction, mu, logvar, log_probs = self.VAE(states)
        KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
        reconstruction_loss = F.mse_loss(reconstruction, states)
        final_loss = KLD + reconstruction_loss
        self.VAE.optimizer.zero_grad()
        final_loss.backward(retain_graph=True)
        self.VAE.optimizer.step()

        latent_states = self.VAE.sample_normal(states)
        states_value = self.value(latent_states).view(-1)
        new_latent_states = self.VAE.sample_normal(new_states)
        new_states_value = self.target_value(new_latent_states).view(-1)
        new_states_value[dones] = 0.0

        action = self.actor(latent_states)
        q1_new_policy = self.critic_1(latent_states, action)
        q2_new_policy = self.critic_2(latent_states, action)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value
        value_loss = 0.5 * F.mse_loss(states_value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actor_loss = -critic_value
        actor_loss = torch.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q_hat = self.scale * rewards + self.gamma * new_states_value
        q1_old_policy = self.critic_1(latent_states, actions).view(-1)
        q2_old_policy = self.critic_2(latent_states, actions).view(-1)
        critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)
        critic_loss = critic1_loss + critic2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()
        self.update_network_parameters()
        return final_loss, value_loss, actor_loss, critic_loss
示例#4
0
class Agent_sm():
    def __init__(self,
                 alpha=0.0003,
                 beta=0.0003,
                 input_dims=8,
                 env=None,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 tau=0.005,
                 layer1_size=256,
                 layer2_size=256,
                 batch_size=256,
                 reward_scale=2):
        self.gamma = 0.99
        self.tau = tau
        self.memeory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions

        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  env.action_space.high,
                                  n_actions=n_actions)
        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      n_actions,
                                      name='critic_1')
        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      n_actions,
                                      name='critic_2')
        self.value = ValueNetwork(beta, input_dims, name='value')
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')

        self.scale = reward_scale
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = torch.Tensor([observation]).to(self.actor.device)
        actions, _ = self.actor.sample_normal(state, reparameterize=False)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memeory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau
        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_dict = dict(target_value_params)
        value_dict = dict(value_params)

        for name in target_value_dict:
            target_value_dict[name] = tau*value_dict[name].clone() + \
                (1-tau)*target_value_dict[name].clone()

        self.target_value.load_state_dict(target_value_dict)

    def save_models(self):
        print('... saving models ...')
        self.actor.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()

    def load_models(self):
        print('... loading models ...')
        self.actor.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()

    def learn(self):
        if self.memeory.mem_cntr < self.batch_size:
            return

        states, new_states, actions, rewards, dones = self.memeory.sample_buffer(
            self.batch_size)
        states = torch.tensor(states, dtype=torch.float).to(self.actor.device)
        new_states = torch.tensor(new_states,
                                  dtype=torch.float).to(self.actor.device)
        actions = torch.tensor(actions,
                               dtype=torch.float).to(self.actor.device)
        rewards = torch.tensor(rewards,
                               dtype=torch.float).to(self.actor.device)
        dones = torch.tensor(dones).to(self.actor.device)

        states_value = self.value(states).view(-1)
        new_states_value = self.target_value(new_states).view(-1)
        new_states_value[dones] = 0.0

        action, log_probs = self.actor.sample_normal(states,
                                                     reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(states, action)
        q2_new_policy = self.critic_2(states, action)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(states_value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        action, log_probs = self.actor.sample_normal(states,
                                                     reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(states, action)
        q2_new_policy = self.critic_2(states, action)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = torch.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q_hat = self.scale * rewards + self.gamma * new_states_value
        q1_old_policy = self.critic_1(states, actions).view(-1)
        q2_old_policy = self.critic_2(states, actions).view(-1)
        critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)
        critic_loss = critic1_loss + critic2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.update_network_parameters()
        #         value_loss = value_loss.cpu().detach().numpy()[0]
        #         actor_loss = actor_loss.cpu().detach().numpy()[0]
        #         critic_loss = critic_loss.cpu().detach().numpy()[0]

        return 0, value_loss, actor_loss, critic_loss

    def learn_sm(self, sm_reg=1):
        if self.memeory.mem_cntr < self.batch_size:
            return

        states, new_states, actions, rewards, dones = self.memeory.sample_buffer(
            self.batch_size)
        states = torch.tensor(states, dtype=torch.float).to(self.actor.device)
        new_states = torch.tensor(new_states,
                                  dtype=torch.float).to(self.actor.device)
        actions = torch.tensor(actions,
                               dtype=torch.float).to(self.actor.device)
        rewards = torch.tensor(rewards,
                               dtype=torch.float).to(self.actor.device)
        dones = torch.tensor(dones).to(self.actor.device)

        states_value = self.value(states).view(-1)
        new_states_value = self.target_value(new_states).view(-1)
        new_states_value[dones] = 0.0

        #         action, log_probs = self.actor.sample_normal(states, reparameterize=False)
        #         log_probs = log_probs.view(-1)
        #         q1_new_policy = self.critic_1(states, action)
        #         q2_new_policy = self.critic_2(states, action)
        #         critic_value = torch.min(q1_new_policy, q2_new_policy)
        #         critic_value = critic_value.view(-1)

        #         self.value.optimizer.zero_grad()
        #         value_target = critic_value - log_probs
        #         value_loss = 0.5 * F.mse_loss(states_value, value_target)
        #         value_loss.backward(retain_graph=True)
        #         self.value.optimizer.step()

        #         action, log_probs = self.actor.sample_normal(states, reparameterize=True)
        action, _ = self.actor.sample_normal(states, reparameterize=True)
        #         log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(states, action)
        q2_new_policy = self.critic_2(states, action)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        # sample actions for next batch states
        action_next, _ = self.actor.sample_normal(new_states,
                                                  reparameterize=True)
        q1_new_policy = self.critic_1(new_states, action_next)
        q2_new_policy = self.critic_2(new_states, action_next)
        critic_value_next = torch.min(q1_new_policy, q2_new_policy)
        critic_value_next = critic_value.view(-1)

        #         actor_loss = log_probs - critic_value
        actor_loss = -(critic_value + critic_value_next) + sm_reg * F.mse_loss(
            action, action_next)
        actor_loss = torch.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        #         self.critic_1.optimizer.zero_grad()
        #         self.critic_2.optimizer.zero_grad()

        #         q_hat = self.scale*rewards + self.gamma*new_states_value
        #         q1_old_policy = self.critic_1(states, actions).view(-1)
        #         q2_old_policy = self.critic_2(states, actions).view(-1)
        #         critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        #         critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)
        #         critic_loss = critic1_loss + critic2_loss
        #         critic_loss.backward()
        #         self.critic_1.optimizer.step()
        #         self.critic_2.optimizer.step()

        #         self.update_network_parameters()

        return 0, 0, actor_loss, 0
示例#5
0
class Agent:
    def __init__(self,
                 actor_dims,
                 critic_dims,
                 n_actions,
                 n_agents,
                 agent_idx,
                 chkpt_dir,
                 alpha=0.01,
                 beta=0.01,
                 fc1=64,
                 fc2=64,
                 gamma=0.95,
                 tau=0.01):
        """

        Args:
            actor_dims:
            critic_dims:
            n_actions: number of actions
            n_agents:
            agent_idx: agent index
            chkpt_dir: checkpoint directory
            alpha: learning rate
            beta: learning rate
            fc1:
            fc2:
            gamma: discount factor
            tau: soft update parameter
        """
        self.gamma = gamma
        self.tau = tau
        self.n_actions = n_actions
        self.agent_name = 'agent_%s' % agent_idx
        # e.g., name = agent_1_actor
        self.actor = ActorNetwork(alpha,
                                  actor_dims,
                                  fc1,
                                  fc2,
                                  n_actions,
                                  chkpt_dir=chkpt_dir,
                                  name=self.agent_name + '_actor')
        self.critic = CriticNetwork(beta,
                                    critic_dims,
                                    fc1,
                                    fc2,
                                    n_agents,
                                    n_actions,
                                    chkpt_dir=chkpt_dir,
                                    name=self.agent_name + '_critic')
        self.target_actor = ActorNetwork(alpha,
                                         actor_dims,
                                         fc1,
                                         fc2,
                                         n_actions,
                                         chkpt_dir=chkpt_dir,
                                         name=self.agent_name +
                                         '_target_actor')
        self.target_critic = CriticNetwork(beta,
                                           critic_dims,
                                           fc1,
                                           fc2,
                                           n_agents,
                                           n_actions,
                                           chkpt_dir=chkpt_dir,
                                           name=self.agent_name +
                                           '_target_critic')

        # initially target networks and networks have the same parameters
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        """

        Args:
            observation:

        Returns: action w.r.t. the current policy and exploration

        """
        state = T.tensor([observation], dtype=T.float).to(self.actor.device)
        # action of current policy
        actions = self.actor.forward(state)
        # exploration (0.1 is the parameter of the exploration)
        noise = 0.1 * T.rand(self.n_actions).to(self.actor.device)
        # print(f"action={action}, noise={noise}")
        action = actions + noise
        # action = actions

        return action.detach().cpu().numpy()[0]

    def update_network_parameters(self, tau=None):
        # use default tau if nothing is input
        if tau is None:
            tau = self.tau

        target_actor_params = self.target_actor.named_parameters()
        actor_params = self.actor.named_parameters()

        # soft update of target networks
        target_actor_state_dict = dict(target_actor_params)
        actor_state_dict = dict(actor_params)
        for name in actor_state_dict:
            actor_state_dict[name] = tau * actor_state_dict[name].clone() + \
                                     (1 - tau) * target_actor_state_dict[
                                         name].clone()

        self.target_actor.load_state_dict(actor_state_dict)

        target_critic_params = self.target_critic.named_parameters()
        critic_params = self.critic.named_parameters()

        target_critic_state_dict = dict(target_critic_params)
        critic_state_dict = dict(critic_params)
        for name in critic_state_dict:
            critic_state_dict[name] = tau * critic_state_dict[name].clone() + \
                                      (1 - tau) * target_critic_state_dict[
                                          name].clone()

        self.target_critic.load_state_dict(critic_state_dict)

    def save_models(self):
        self.actor.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_critic.save_checkpoint()

    def load_models(self):
        self.actor.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_critic.load_checkpoint()
示例#6
0
class Agent():
    def __init__(self, alpha=0.0003, beta=0.0003, input_dims=[8],
                 env=None, gamma=0.99, n_actions=2, max_size=1000000, tau=0.005,
                 layer1_size=256, layer2_size=256, batch_size=256, reward_scale=2):
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions

        self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions,
                                  name='actor', max_action=env.action_space.high)
        self.critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions,
                                      name='critic_1')
        self.critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions,
                                      name='critic_2')
        self.value = ValueNetwork(beta, input_dims, name='value')
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')

        self.scale = reward_scale
        self.update_network_parameters(tau=1)                                       #sets the parameters of Target-network equals to the
                                                                                    #values of Target-network in the beginning

    def choose_action(self, observation):
        state = T.Tensor([observation]).to(self.actor.device)
        actions, _ = self.actor.sample_normal(state, reparameterize=False)

        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:                                                              #different copies: exact VS soft
            tau = self.tau

        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)

        for name in value_state_dict:
            value_state_dict[name] = tau * value_state_dict[name].clone() + \
                                     (1 - tau) * target_value_state_dict[name].clone()

        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        print('.... saving models ....')
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()

    def load_models(self):
        print('.... loading models ....')
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return

        state, action, reward, new_state, done = \                                      #takes the batch
            self.memory.sample_buffer(self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.actor.device)                  #trasforms into tensors
        done = T.tensor(done).to(self.actor.device)
        state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device)
        state = T.tensor(state, dtype=T.float).to(self.actor.device)
        action = T.tensor(action, dtype=T.float).to(self.actor.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0                                                              #####_ sta usando 0 per dire True? @15, 17

        actions, log_probs = self.actor.sample_normal(state, reparameterize=False)      #takes the lower Q-values from 2 Critics to the Critic
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1.forward(state, actions)
        q2_new_policy = self.critic_2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs                                         ####_ Perchè non prende semplicemente il critic_value?
        value_loss = 0.5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state, reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1.forward(state, actions)
        q2_new_policy = self.critic_2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = T.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()
        q_hat = self.scale * reward + self.gamma * value_                       #The scaling factor takes into account the entropy and encourage exploration
        q1_old_policy = self.critic_1.forward(state, action).view(-1)
        q2_old_policy = self.critic_2.forward(state, action).view(-1)
        critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.update_network_parameters()
示例#7
0
class Agent(object):
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 action_bound,
                 tau,
                 env,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=400,
                 layer2_size=300,
                 batch_size=64):
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.action_bound = action_bound
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer1_size,
                                  layer2_size,
                                  n_actions=n_actions,
                                  name='Actor')
        self.critic = CriticNetwork(beta,
                                    input_dims,
                                    layer1_size,
                                    layer2_size,
                                    n_actions=n_actions,
                                    name='Critic')

        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         layer1_size,
                                         layer2_size,
                                         n_actions=n_actions,
                                         name='TargetActor')
        self.target_critic = CriticNetwork(beta,
                                           input_dims,
                                           layer1_size,
                                           layer2_size,
                                           n_actions=n_actions,
                                           name='TargetCritic')

        self.noise = OUActionNoise(mu=np.zeros(n_actions))

        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        self.actor.eval()
        observation = T.tensor(observation,
                               dtype=T.float).to(self.actor.device)
        mu = self.actor.forward(observation).to(self.actor.device)
        mu_prime = mu + T.tensor(self.noise(), dtype=T.float).to(
            self.actor.device)
        self.actor.train()
        return (mu_prime * T.tensor(self.action_bound)).cpu().detach().numpy()

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return
        state, action, reward, new_state, done = \
                                      self.memory.sample_buffer(self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.critic.device)
        done = T.tensor(done).to(self.critic.device)
        new_state = T.tensor(new_state, dtype=T.float).to(self.critic.device)
        action = T.tensor(action, dtype=T.float).to(self.critic.device)
        state = T.tensor(state, dtype=T.float).to(self.critic.device)

        self.target_actor.eval()
        self.target_critic.eval()
        self.critic.eval()
        target_actions = self.target_actor.forward(new_state)
        critic_value_ = self.target_critic.forward(new_state, target_actions)
        critic_value = self.critic.forward(state, action)

        target = []
        for j in range(self.batch_size):
            target.append(reward[j] + self.gamma * critic_value_[j] * done[j])
        target = T.tensor(target).to(self.critic.device)
        target = target.view(self.batch_size, 1)

        self.critic.train()
        self.critic.optimizer.zero_grad()
        critic_loss = F.mse_loss(target, critic_value)
        critic_loss.backward()
        self.critic.optimizer.step()

        self.critic.eval()
        self.actor.optimizer.zero_grad()
        mu = self.actor.forward(state)
        self.actor.train()
        actor_loss = -self.critic.forward(state, mu)
        actor_loss = T.mean(actor_loss)
        actor_loss.backward()
        self.actor.optimizer.step()

        self.update_network_parameters()

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        actor_params = self.actor.named_parameters()
        critic_params = self.critic.named_parameters()
        target_actor_params = self.target_actor.named_parameters()
        target_critic_params = self.target_critic.named_parameters()

        critic_state_dict = dict(critic_params)
        actor_state_dict = dict(actor_params)
        target_critic_dict = dict(target_critic_params)
        target_actor_dict = dict(target_actor_params)

        for name in critic_state_dict:
            critic_state_dict[name] = tau*critic_state_dict[name].clone() + \
                                      (1-tau)*target_critic_dict[name].clone()

        self.target_critic.load_state_dict(critic_state_dict)

        for name in actor_state_dict:
            actor_state_dict[name] = tau*actor_state_dict[name].clone() + \
                                      (1-tau)*target_actor_dict[name].clone()
        self.target_actor.load_state_dict(actor_state_dict)
        """
        #Verify that the copy assignment worked correctly
        target_actor_params = self.target_actor.named_parameters()
        target_critic_params = self.target_critic.named_parameters()

        critic_state_dict = dict(target_critic_params)
        actor_state_dict = dict(target_actor_params)
        print('\nActor Networks', tau)
        for name, param in self.actor.named_parameters():
            print(name, T.equal(param, actor_state_dict[name]))
        print('\nCritic Networks', tau)
        for name, param in self.critic.named_parameters():
            print(name, T.equal(param, critic_state_dict[name]))
        input()
        """

    def save_models(self):
        self.actor.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_critic.save_checkpoint()

    def load_models(self):
        self.actor.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_critic.load_checkpoint()

    def check_actor_params(self):
        current_actor_params = self.actor.named_parameters()
        current_actor_dict = dict(current_actor_params)
        original_actor_dict = dict(self.original_actor.named_parameters())
        original_critic_dict = dict(self.original_critic.named_parameters())
        current_critic_params = self.critic.named_parameters()
        current_critic_dict = dict(current_critic_params)
        print('Checking Actor parameters')

        for param in current_actor_dict:
            print(
                param,
                T.equal(original_actor_dict[param], current_actor_dict[param]))
        print('Checking critic parameters')
        for param in current_critic_dict:
            print(
                param,
                T.equal(original_critic_dict[param],
                        current_critic_dict[param]))
        input()
示例#8
0
class Agent:
    def __init__(self,
                 actor_dims,
                 critic_dims,
                 n_actions,
                 n_agents,
                 agent_idx,
                 chkpt_dir,
                 alpha=0.01,
                 beta=0.01,
                 fc1=64,
                 fc2=64,
                 gamma=0.95,
                 tau=0.01):
        self.gamma = gamma
        self.tau = tau
        self.n_actions = n_actions
        self.agent_name = 'agent_%s' % agent_idx
        self.actor = ActorNetwork(alpha,
                                  actor_dims,
                                  fc1,
                                  fc2,
                                  n_actions,
                                  chkpt_dir=chkpt_dir,
                                  name=self.agent_name + '_actor')
        self.critic = CriticNetwork(beta,
                                    critic_dims,
                                    fc1,
                                    fc2,
                                    n_agents,
                                    n_actions,
                                    chkpt_dir=chkpt_dir,
                                    name=self.agent_name + '_critic')
        self.target_actor = ActorNetwork(alpha,
                                         actor_dims,
                                         fc1,
                                         fc2,
                                         n_actions,
                                         chkpt_dir=chkpt_dir,
                                         name=self.agent_name +
                                         '_target_actor')
        self.target_critic = CriticNetwork(beta,
                                           critic_dims,
                                           fc1,
                                           fc2,
                                           n_agents,
                                           n_actions,
                                           chkpt_dir=chkpt_dir,
                                           name=self.agent_name +
                                           '_target_critic')

        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = T.tensor([observation], dtype=T.float).to(self.actor.device)
        actions = self.actor.forward(state)
        noise = T.rand(self.n_actions).to(self.actor.device)
        action = actions + noise

        return action.detach().cpu().numpy()[0]

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        target_actor_params = self.target_actor.named_parameters()
        actor_params = self.actor.named_parameters()

        target_actor_state_dict = dict(target_actor_params)
        actor_state_dict = dict(actor_params)
        for name in actor_state_dict:
            actor_state_dict[name] = tau*actor_state_dict[name].clone() + \
                    (1-tau)*target_actor_state_dict[name].clone()

        self.target_actor.load_state_dict(actor_state_dict)

        target_critic_params = self.target_critic.named_parameters()
        critic_params = self.critic.named_parameters()

        target_critic_state_dict = dict(target_critic_params)
        critic_state_dict = dict(critic_params)
        for name in critic_state_dict:
            critic_state_dict[name] = tau*critic_state_dict[name].clone() + \
                    (1-tau)*target_critic_state_dict[name].clone()

        self.target_critic.load_state_dict(critic_state_dict)

    def save_models(self):
        self.actor.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_critic.save_checkpoint()

    def load_models(self):
        self.actor.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_critic.load_checkpoint()
示例#9
0
class Agent():
    def __init__(self,
                 alpha=0.0003,
                 beta=0.0003,
                 input_dims=8,
                 env=None,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 tau=0.005,
                 layer1_size=256,
                 layer2_size=256,
                 batch_size=256,
                 reward_scale=2):
        self.gamma = 0.99
        self.tau = tau
        self.memeory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions

        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  env.action_space.high,
                                  n_actions=n_actions)
        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      n_actions,
                                      name='critic_1')
        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      n_actions,
                                      name='critic_2')
        self.value = ValueNetwork(beta, input_dims, name='value')
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')

        self.scale = reward_scale
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = torch.Tensor([observation]).to(self.actor.device)
        actions, _ = self.actor.sample_normal(state, reparameterize=False)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memeory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau
        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_dict = dict(target_value_params)
        value_dict = dict(value_params)

        for name in target_value_dict:
            target_value_dict[name] = tau*value_dict[name].clone() + \
                (1-tau)*target_value_dict[name].clone()

        self.target_value.load_state_dict(target_value_dict)

    def save_models(self):
        print('... saving models ...')
        self.actor.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()

    def load_models(self):
        print('... loading models ...')
        self.actor.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()

    def learn(self):
        if self.memeory.mem_cntr < self.batch_size:
            return

        states, new_states, actions, rewards, dones = self.memeory.sample_buffer(
            self.batch_size)
        states = torch.tensor(states, dtype=torch.float).to(self.actor.device)
        new_states = torch.tensor(new_states,
                                  dtype=torch.float).to(self.actor.device)
        actions = torch.tensor(actions,
                               dtype=torch.float).to(self.actor.device)
        rewards = torch.tensor(rewards,
                               dtype=torch.float).to(self.actor.device)
        dones = torch.tensor(dones).to(self.actor.device)

        states_value = self.value(states).view(-1)
        new_states_value = self.target_value(new_states).view(-1)
        new_states_value[dones] = 0.0

        action, log_probs = self.actor.sample_normal(states,
                                                     reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(states, action)
        q2_new_policy = self.critic_2(states, action)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(states_value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        action, log_probs = self.actor.sample_normal(states,
                                                     reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(states, action)
        q2_new_policy = self.critic_2(states, action)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = torch.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q_hat = self.scale * rewards + self.gamma * new_states_value
        q1_old_policy = self.critic_1(states, actions).view(-1)
        q2_old_policy = self.critic_2(states, actions).view(-1)
        critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)
        critic_loss = critic1_loss + critic2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.update_network_parameters()

        return value_loss, actor_loss, critic_loss

    def train_on_env(self, env):
        rewards = []
        done = False
        observation = env.reset()
        while not done:
            action = self.choose_action(observation)
            observation_, reward, done, _ = env.step(action)
            self.remember(observation, action, reward, observation_, done)
            #if not load_checkpoints:
            self.learn()
            observation = observation_
            rewards.append(reward)
        return np.sum(rewards)

    def generate_session(self, env, t_max=1000):
        states, traj_probs, actions, rewards = [], [], [], []
        s = env.reset()
        q_t = 0
        for t in range(t_max):
            state = torch.Tensor([s]).to(self.actor.device)
            action, log_probs = self.actor.sample_normal(state,
                                                         reparameterize=False)
            action = action.cpu().detach().numpy()[0]

            new_s, r, done, info = env.step(action)

            log_probs = log_probs.cpu().detach().numpy()[0]
            #q_t *= probs
            q_t += log_probs[0]
            states.append(s.tolist())
            traj_probs.append(q_t)
            actions.append(action[0])
            rewards.append(r)

            s = new_s
            if done:
                break

        return np.array(states), np.array(traj_probs), np.array(
            actions), np.array(rewards)
示例#10
0
class Agent():
    def __init__(self, input_dims, env, n_actions):
        self.memory = ReplayBuffer(input_dims)
        self.n_actions = n_actions
        self.actor_nn = ActorNetwork(input_dims,
                                     n_actions=n_actions,
                                     name=Constants.env_id + '_actor',
                                     max_action=env.action_space.n)
        self.critic_local_1_nn = CriticNetwork(input_dims,
                                               n_actions=n_actions,
                                               name=Constants.env_id +
                                               '_critic_local_1')
        self.critic_local_2_nn = CriticNetwork(input_dims,
                                               n_actions=n_actions,
                                               name=Constants.env_id +
                                               '_critic_local_2')
        self.critic_target_1_nn = CriticNetwork(input_dims,
                                                n_actions=n_actions,
                                                name=Constants.env_id +
                                                '_critic_target_1')
        self.critic_target_2_nn = CriticNetwork(input_dims,
                                                n_actions=n_actions,
                                                name=Constants.env_id +
                                                '_critic_target_2')

    def choose_action(self, observation):
        state = T.Tensor([observation]).to(Constants.device)
        _, max_probability_action = self.actor_nn.sample_action(state)
        return max_probability_action

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def learn(self):
        if self.memory.mem_cntr < Hyper.batch_size:
            return

        state, action, reward, next_state, done = self.memory.sample_buffer()

        reward = T.tensor(reward, dtype=T.float).to(Constants.device)
        done = T.tensor(done).to(Constants.device)
        next_state = T.tensor(next_state, dtype=T.float).to(Constants.device)
        state = T.tensor(state, dtype=T.float).to(Constants.device)
        action = T.tensor(action, dtype=T.float).to(Constants.device)

        (action_probabilities,
         log_action_probabilities), _ = self.actor_nn.sample_action(next_state)
        with T.no_grad():
            action_logits1 = self.critic_target_1_nn(next_state)
            q1_new_policy = T.argmax(action_logits1, dim=1, keepdim=True)
            action_logits2 = self.critic_target_2_nn(next_state)
            q2_new_policy = T.argmax(action_logits2, dim=1, keepdim=True)
            q_value = T.min(q1_new_policy, q2_new_policy)
            min_qf_next_target = action_probabilities * (
                q_value - Hyper.alpha * log_action_probabilities)
            min_qf_next_target_sum = min_qf_next_target.sum(
                dim=1).unsqueeze(-1)
            not_done = (1.0 - done * 1).unsqueeze(-1)
            next_q_value = reward.unsqueeze(
                -1) + not_done * Hyper.gamma * (min_qf_next_target_sum)

        action_logits1 = self.critic_local_1_nn(state).gather(1, action.long())
        q_value1 = action_logits1.sum(dim=1).unsqueeze(-1)
        action_logits2 = self.critic_local_2_nn(state).gather(1, action.long())
        q_value2 = action_logits2.sum(dim=1).unsqueeze(-1)
        self.critic_local_1_nn.optimizer.zero_grad()
        self.critic_local_2_nn.optimizer.zero_grad()
        critic_1_loss = F.mse_loss(q_value1, next_q_value)
        critic_2_loss = F.mse_loss(q_value2, next_q_value)
        critic_1_loss.backward()
        critic_2_loss.backward()
        self.critic_local_1_nn.optimizer.step()
        self.critic_local_2_nn.optimizer.step()

        (action_probabilities,
         log_action_probabilities), _ = self.actor_nn.sample_action(state)

        # -------------------------------------------------------------------------------------------
        # Calculates the loss for the actor. This loss includes the additional entropy term
        # CHANGE0003 Soft state-value where actions are discrete
        self.actor_nn.optimizer.zero_grad()
        action_logits1 = self.critic_target_1_nn(state)
        q1_new_policy = T.argmax(action_logits1, dim=1, keepdim=True)
        action_logits2 = self.critic_target_2_nn(state)
        q2_new_policy = T.argmax(action_logits2, dim=1, keepdim=True)
        q_value = T.min(q1_new_policy, q2_new_policy)
        inside_term = Hyper.alpha * log_action_probabilities - q_value
        policy_loss = (action_probabilities * inside_term).sum(dim=1).mean()
        policy_loss.backward(retain_graph=True)
        self.actor_nn.optimizer.step()

        self.update_q_weights()

    def update_q_weights(self):
        local_1_parameters = self.critic_local_1_nn.named_parameters()
        local_2_parameters = self.critic_local_2_nn.named_parameters()
        target_1_parameters = self.critic_target_1_nn.named_parameters()
        target_2_parameters = self.critic_target_2_nn.named_parameters()

        self.update_network_parameters_line(target_1_parameters,
                                            local_1_parameters, Hyper.tau)
        self.update_network_parameters_line(target_2_parameters,
                                            local_2_parameters, Hyper.tau)

    def update_network_parameters_line(self, target_params, local_params, tau):
        for target_param, local_param in zip(target_params, local_params):
            target_param[1].data.copy_(tau * local_param[1].data +
                                       (1.0 - tau) * target_param[1].data)

    def save_models(self):
        print('.... saving models ....')
        self.actor_nn.save_checkpoint()
        self.critic_local_1_nn.save_checkpoint()
        self.critic_local_2_nn.save_checkpoint()
        self.critic_target_1_nn.save_checkpoint()
        self.critic_target_2_nn.save_checkpoint()

    def load_models(self):
        print('.... loading models ....')
        self.actor_nn.load_checkpoint()
        self.critic_local_1_nn.load_checkpoint()
        self.critic_local_2_nn.load_checkpoint()
        self.critic_target_1_nn.load_checkpoint()
        self.critic_target_2_nn.load_checkpoint()
示例#11
0
class Agent():
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 env,
                 action_bound,
                 gamma=0.99,
                 update_actor_interval=2,
                 warmup=1000,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=400,
                 layer2_size=300,
                 batch_size=100,
                 noise=0.1):
        self.gamma = gamma
        self.tau = tau
        self.max_action = env.action_space.high
        self.min_action = env.action_space.low
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.learn_step_cntr = 0
        self.time_step = 0
        self.warmup = warmup
        self.n_actions = n_actions
        self.update_actor_iter = update_actor_interval
        self.action_bound = action_bound
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer1_size,
                                  layer2_size,
                                  n_actions=n_actions,
                                  name='actor')

        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      layer1_size,
                                      layer2_size,
                                      n_actions=n_actions,
                                      name='critic_1')
        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      layer1_size,
                                      layer2_size,
                                      n_actions=n_actions,
                                      name='critic_2')

        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         layer1_size,
                                         layer2_size,
                                         n_actions=n_actions,
                                         name='target_actor')
        self.target_critic_1 = CriticNetwork(beta,
                                             input_dims,
                                             layer1_size,
                                             layer2_size,
                                             n_actions=n_actions,
                                             name='target_critic_1')
        self.target_critic_2 = CriticNetwork(beta,
                                             input_dims,
                                             layer1_size,
                                             layer2_size,
                                             n_actions=n_actions,
                                             name='target_critic_2')

        self.noise = noise
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        if self.time_step < self.warmup:
            mu = T.tensor(
                np.random.normal(scale=self.noise, size=(self.n_actions, )))
        else:
            state = T.tensor(observation, dtype=T.float).to(self.actor.device)
            mu = self.actor.forward(state).to(self.actor.device)
        mu_prime = mu + T.tensor(np.random.normal(scale=self.noise),
                                 dtype=T.float).to(self.actor.device)

        mu_prime = T.clamp(mu_prime, self.min_action[0], self.max_action[0])
        self.time_step += 1

        return (mu_prime * T.tensor(self.action_bound)).cpu().detach().numpy()

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return
        state, action, reward, new_state, done = \
                self.memory.sample_buffer(self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.critic_1.device)
        done = T.tensor(done).to(self.critic_1.device)
        state_ = T.tensor(new_state, dtype=T.float).to(self.critic_1.device)
        state = T.tensor(state, dtype=T.float).to(self.critic_1.device)
        action = T.tensor(action, dtype=T.float).to(self.critic_1.device)

        target_actions = self.target_actor.forward(state_)
        target_actions = target_actions + \
                T.clamp(T.tensor(np.random.normal(scale=0.2)), -0.5, 0.5)
        target_actions = T.clamp(target_actions, self.min_action[0],
                                 self.max_action[0])

        q1_ = self.target_critic_1.forward(state_, target_actions)
        q2_ = self.target_critic_2.forward(state_, target_actions)

        q1 = self.critic_1.forward(state, action)
        q2 = self.critic_2.forward(state, action)

        q1_[done] = 0.0
        q2_[done] = 0.0

        q1_ = q1_.view(-1)
        q2_ = q2_.view(-1)

        critic_value_ = T.min(q1_, q2_)

        target = reward + self.gamma * critic_value_
        target = target.view(self.batch_size, 1)

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q1_loss = F.mse_loss(target, q1)
        q2_loss = F.mse_loss(target, q2)
        critic_loss = q1_loss + q2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.learn_step_cntr += 1

        if self.learn_step_cntr % self.update_actor_iter != 0:
            return

        self.actor.optimizer.zero_grad()
        actor_q1_loss = self.critic_1.forward(state, self.actor.forward(state))
        actor_loss = -T.mean(actor_q1_loss)
        actor_loss.backward()
        self.actor.optimizer.step()

        self.update_network_parameters()

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        actor_params = self.actor.named_parameters()
        critic_1_params = self.critic_1.named_parameters()
        critic_2_params = self.critic_2.named_parameters()
        target_actor_params = self.target_actor.named_parameters()
        target_critic_1_params = self.target_critic_1.named_parameters()
        target_critic_2_params = self.target_critic_2.named_parameters()

        critic_1 = dict(critic_1_params)
        critic_2 = dict(critic_2_params)
        actor = dict(actor_params)
        target_actor = dict(target_actor_params)
        target_critic_1 = dict(target_critic_1_params)
        target_critic_2 = dict(target_critic_2_params)

        for name in critic_1:
            critic_1[name] = tau*critic_1[name].clone() + \
                    (1-tau)*target_critic_1[name].clone()

        for name in critic_2:
            critic_2[name] = tau*critic_2[name].clone() + \
                    (1-tau)*target_critic_2[name].clone()

        for name in actor:
            actor[name] = tau*actor[name].clone() + \
                    (1-tau)*target_actor[name].clone()

        self.target_critic_1.load_state_dict(critic_1)
        self.target_critic_2.load_state_dict(critic_2)
        self.target_actor.load_state_dict(actor)

    def save_models(self):
        self.actor.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()
        self.target_critic_1.save_checkpoint()
        self.target_critic_2.save_checkpoint()

    def load_models(self):
        self.actor.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()
        self.target_critic_1.load_checkpoint()
        self.target_critic_2.load_checkpoint()
示例#12
0
class Agent():
    def __init__(self, input_dims, env, n_actions):
        self.memory = ReplayBuffer(input_dims)
        self.n_actions = n_actions

        self.actor_nn = ActorNetwork(input_dims,
                                     n_actions=n_actions,
                                     name=Constants.env_id + '_actor',
                                     max_action=env.action_space.n)
        self.critic_local_1_nn = CriticNetwork(input_dims,
                                               n_actions=n_actions,
                                               name=Constants.env_id +
                                               '_critic_local_1')
        self.critic_local_2_nn = CriticNetwork(input_dims,
                                               n_actions=n_actions,
                                               name=Constants.env_id +
                                               '_critic_local_2')
        self.critic_target_1_nn = CriticNetwork(input_dims,
                                                n_actions=n_actions,
                                                name=Constants.env_id +
                                                '_critic_target_1')
        self.critic_target_2_nn = CriticNetwork(input_dims,
                                                n_actions=n_actions,
                                                name=Constants.env_id +
                                                '_critic_target_2')
        self.value_nn = ValueNetwork(input_dims,
                                     name=Constants.env_id + '_value')
        self.target_value_nn = ValueNetwork(input_dims,
                                            name=Constants.env_id +
                                            '_target_value')
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = T.Tensor([observation]).to(Constants.device)
        _, max_probability_action = self.actor_nn.sample_action(state)
        return max_probability_action

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def learn(self):
        if self.memory.mem_cntr < Hyper.batch_size:
            return

        state, action, reward, next_state, done = self.memory.sample_buffer()

        reward = T.tensor(reward, dtype=T.float).to(Constants.device)
        done = T.tensor(done).to(Constants.device)
        next_state = T.tensor(next_state, dtype=T.float).to(Constants.device)
        state = T.tensor(state, dtype=T.float).to(Constants.device)
        action = T.tensor(action, dtype=T.float).to(Constants.device)

        # value_from_nn = self.value_nn(state).view(-1)
        value_from_nn = self.value_nn(state)
        new_value_from_nn = self.target_value_nn(next_state).view(-1)
        new_value_from_nn[done] = 0.0

        (action_probabilities,
         log_action_probabilities), _ = self.actor_nn.sample_action(next_state)
        with T.no_grad():
            q1_new_policy = self.critic_target_1_nn(next_state)
            q2_new_policy = self.critic_target_2_nn(next_state)
            critic_value = T.min(q1_new_policy, q2_new_policy)

        self.value_nn.optimizer.zero_grad()
        # CHANGE0003 Soft state-value where actions are discrete
        inside_term = Hyper.alpha * log_action_probabilities - critic_value
        #value_target = action_probabilities * (critic_value - Hyper.alpha * log_action_probabilities)
        value_loss = (action_probabilities * inside_term).sum(dim=1).mean()
        value_loss.backward(retain_graph=True)
        self.value_nn.optimizer.step()

        (action_probabilities,
         log_action_probabilities), _ = self.actor_nn.sample_action(state)
        with T.no_grad():
            q1_new_policy = self.critic_local_1_nn(state)
            q2_new_policy = self.critic_local_1_nn(state)
            critic_value = T.min(q1_new_policy, q2_new_policy)

        # CHANGE0005 Objective for policy
        actor_loss = action_probabilities * (
            Hyper.alpha * log_action_probabilities - critic_value)
        actor_loss = T.mean(actor_loss)
        self.actor_nn.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor_nn.optimizer.step()

        self.critic_local_1_nn.optimizer.zero_grad()
        self.critic_local_2_nn.optimizer.zero_grad()
        q_hat = Hyper.reward_scale * reward + Hyper.gamma * new_value_from_nn
        action_logits1 = self.critic_local_1_nn(state)
        q1_old_policy = T.argmax(action_logits1, dim=1, keepdim=True).view(-1)
        action_logits2 = self.critic_local_2_nn(state)
        q2_old_policy = T.argmax(action_logits2, dim=1, keepdim=True).view(-1)
        critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic_local_1_nn.optimizer.step()
        self.critic_local_2_nn.optimizer.step()
        self.update_network_parameters()

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = Hyper.tau

        target_value_params = self.target_value_nn.named_parameters()
        value_params = self.value_nn.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)
        for name in value_state_dict:
            value_state_dict[name] = tau*value_state_dict[name].clone() + \
                    (1-tau)*target_value_state_dict[name].clone()

        self.target_value_nn.load_state_dict(value_state_dict)

        self.update_network_parameters_line(
            self.critic_target_1_nn.named_parameters(),
            self.critic_local_1_nn.named_parameters(), tau)
        self.update_network_parameters_line(
            self.critic_target_2_nn.named_parameters(),
            self.critic_local_2_nn.named_parameters(), tau)

    def update_network_parameters_line(self, target_params, local_params, tau):
        for target_param, local_param in zip(target_params, local_params):
            target_param[1].data.copy_(tau * local_param[1].data +
                                       (1.0 - tau) * target_param[1].data)

    def save_models(self):
        print('.... saving models ....')
        self.actor_nn.save_checkpoint()
        self.value_nn.save_checkpoint()
        self.target_value_nn.save_checkpoint()
        self.critic_local_1_nn.save_checkpoint()
        self.critic_local_2_nn.save_checkpoint()
        self.critic_target_1_nn.save_checkpoint()
        self.critic_target_2_nn.save_checkpoint()

    def load_models(self):
        print('.... loading models ....')
        self.actor_nn.load_checkpoint()
        self.value_nn.load_checkpoint()
        self.target_value_nn.load_checkpoint()
        self.critic_local_1_nn.load_checkpoint()
        self.critic_local_2_nn.load_checkpoint()
        self.critic_target_1_nn.load_checkpoint()
        self.critic_target_2_nn.load_checkpoint()
示例#13
0
文件: SAC.py 项目: bhargavCSSE/adv-RL
class Agent():
    def __init__(self, alpha=0.0003, beta= 0.0003, input_dims=[8], env=None, 
                gamma=0.99, n_actions=2, max_size=1000000, tau=0.005, 
                ent_alpha = 0.0001, batch_size=256, reward_scale=2, 
                layer1_size=256, layer2_size=256, chkpt_dir='tmp/sac'):
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions
        self.ent_alpha = ent_alpha
        self.reward_scale = reward_scale

        self.actor = ActorNetwork(alpha, input_dims, n_actions=n_actions, 
                                fc1_dims=layer1_size, fc2_dims=layer2_size ,
                                name='actor', chkpt_dir=chkpt_dir)

        self.critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions, 
                                fc1_dims=layer1_size, fc2_dims=layer2_size ,name='critic_1',
                                chkpt_dir=chkpt_dir)
        self.critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions, 
                                fc1_dims=layer1_size, fc2_dims=layer2_size ,name='critic_2',
                                chkpt_dir=chkpt_dir)
        self.target_critic_1 = CriticNetwork(beta, input_dims, n_actions=n_actions, 
                                fc1_dims=layer1_size, fc2_dims=layer2_size ,name='target_critic_1',
                                chkpt_dir=chkpt_dir)
        self.target_critic_2 = CriticNetwork(beta, input_dims, n_actions=n_actions, 
                                fc1_dims=layer1_size, fc2_dims=layer2_size ,name='target_critic_2',
                                chkpt_dir=chkpt_dir)


        self.update_network_parameters(tau=1)

    def choose_actions(self, observation, learn_mode=False):
        if not learn_mode:
            state = T.Tensor([observation]).to(self.actor.device)
        else:
            state = observation

        action_probs = self.actor.forward(state)
        max_probability_action = T.argmax(action_probs, dim=-1)
        action_distribution = Categorical(action_probs)
        action = action_distribution.sample().cpu().detach().numpy()[0]

        z = action_probs == 0.0
        z = z.float()*1e-8
        log_probs = T.log(action_probs + z)

        return action, action_probs, log_probs, max_probability_action

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)
    
    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau
        
        target_critic_1_params = self.target_critic_1.named_parameters()
        target_critic_2_params = self.target_critic_2.named_parameters()
        critic_1_params = self.critic_1.named_parameters()
        critic_2_params = self.critic_2.named_parameters()

        target_critic_1_state_dict = dict(target_critic_1_params)
        critic_1_state_dict = dict(critic_1_params)

        target_critic_2_state_dict = dict(target_critic_2_params)
        critic_2_state_dict = dict(critic_2_params)

        for name in critic_1_state_dict:
            critic_1_state_dict[name] = tau*critic_1_state_dict[name].clone() + (1-tau)*target_critic_1_state_dict[name].clone()
        
        for name in critic_2_state_dict:
            critic_2_state_dict[name] = tau*critic_2_state_dict[name].clone() + (1-tau)*target_critic_2_state_dict[name].clone()
        
        self.target_critic_1.load_state_dict(critic_1_state_dict)
        self.target_critic_2.load_state_dict(critic_2_state_dict)
    
    def save_models(self):
        # print('.....saving models.....')
        self.actor.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()
        self.target_critic_1.save_checkpoint()
        self.target_critic_2.save_checkpoint()
    
    def load_models(self):
        print('.....loading models.....')
        self.actor.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()
        self.target_critic_1.load_checkpoint()
        self.target_critic_2.load_checkpoint()
    
    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return 10, 10, 10, 10, 10
        
        state, action, reward, next_state, done = self.memory.sample_buffer(self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.actor.device)
        done = T.tensor(done, dtype=T.float).to(self.actor.device)
        state_ = T.tensor(next_state, dtype=T.float).to(self.actor.device)
        state = T.tensor(state, dtype=T.float).to(self.actor.device)
        action = T.tensor(action, dtype=T.float).to(self.actor.device)
        
        # Critics Learning

        with T.no_grad():
            action_, probs, log_probs, max_action = self.choose_actions(state_, learn_mode=True)
            qf1_target_ = self.target_critic_1(state_)
            qf2_target_ = self.target_critic_2(state_)
            min_qf_target_ = probs*(T.min(qf1_target_, qf2_target_) - self.ent_alpha*log_probs)
            min_qf_target_ = min_qf_target_.sum(dim=1).view(-1)
            next_q_value = reward + (1.0-done)*self.gamma*min_qf_target_
        
        action = action.view(self.batch_size, 1)
        qf1 = self.critic_1(state).gather(1, action.long())
        qf2 = self.critic_2(state).gather(1, action.long())

        self.critic_1.optimizer.zero_grad()
        qf1_loss = F.mse_loss(qf1, next_q_value)
        qf1_loss.backward(retain_graph=False)
        self.critic_1.optimizer.step()

        self.critic_2.optimizer.zero_grad()
        qf2_loss = F.mse_loss(qf2, next_q_value)
        qf2_loss.backward(retain_graph=False)
        self.critic_2.optimizer.step()

        self.update_network_parameters()

        # Actor loss
        qf1_pi = self.critic_1(state)
        qf2_pi = self.critic_2(state)
        min_qf_pi = T.min(qf1_pi, qf2_pi)
        inside_term = self.ent_alpha*log_probs - min_qf_pi
        actor_loss = (probs*inside_term).sum(dim=1).mean()
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=False)
        self.actor.optimizer.step()

        
    def compute_grads(self):
        if self.memory.mem_cntr < self.batch_size:
            return False
        
        state, action, reward, next_state, done = self.memory.sample_buffer(self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.actor.device)
        done = T.tensor(done, dtype=T.float).to(self.actor.device)
        state_ = T.tensor(next_state, dtype=T.float).to(self.actor.device)
        action = T.tensor(action, dtype=T.float).to(self.actor.device)
        state = T.tensor(state, dtype=T.float).to(self.actor.device)
        state.requires_grad = True

        with T.no_grad():
            action_, probs, log_probs, max_action = self.choose_actions(state_, learn_mode=True)
            qf1_target_ = self.target_critic_1(state_)
            qf2_target_ = self.target_critic_2(state_)
            min_qf_target_ = probs*(T.min(qf1_target_, qf2_target_) - self.ent_alpha*log_probs)
            min_qf_target_ = min_qf_target_.sum(dim=1).view(-1)
            next_q_value = reward + (1.0-done)*self.gamma*min_qf_target_

        self.actor.optimizer.zero_grad()
        qf1_pi = self.critic_1(state)
        qf2_pi = self.critic_2(state)
        min_qf_pi = T.min(qf1_pi, qf2_pi)
        inside_term = self.ent_alpha*log_probs - min_qf_pi
        actor_loss = (probs*inside_term).sum(dim=1).mean()

        actor_loss.backward()
        data_grad = state.grad.data

        return data_grad.mean(axis=0)