Beispiel #1
0
class Agent():
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 env,
                 env_id,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 layer_1_size=256,
                 layer_2_size=256,
                 batch_size=100,
                 reward_scale=2):

        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)

        self.batch_size = batch_size
        self.n_actions = n_actions
        self.scale = reward_scale
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer_1_size,
                                  layer_2_size,
                                  n_actions=n_actions,
                                  name=env_id + '_actor',
                                  max_action=env.action_space.high)

        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      layer_1_size,
                                      layer_2_size,
                                      n_actions=n_actions,
                                      name=env_id + '_critic_1')

        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      layer_1_size,
                                      layer_2_size,
                                      n_actions=n_actions,
                                      name=env_id + '_critic_2')

        self.value = ValueNetwork(beta,
                                  input_dims,
                                  layer_1_size,
                                  layer_2_size,
                                  name=env_id + '_value')

        self.target_value = ValueNetwork(beta,
                                         input_dims,
                                         layer_1_size,
                                         layer_2_size,
                                         name=env_id + '_target_value')

        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = T.tensor([observation], dtype=T.float).to(self.actor.device)
        actions, _ = self.actor.sample_normal(state, reparameterize=False)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, state_, done):
        self.memory.store_transitions(state, action, reward, state_, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        value_params = self.value.named_parameters()
        target_value_params = self.target_value.named_parameters()

        value_state_dict = dict(value_params)
        target_value_state_dict = dict(target_value_params)

        for name in value_state_dict:
            value_state_dict[name] = tau*value_state_dict[name].clone() \
                + (1-tau)*target_value_state_dict[name].clone()

        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        print('.... saving models ....')
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()

    def load_models(self):
        print('.... loading models ....')
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return

        state, action, reward, state_, done =\
            self.memory.sample_buffer(self.batch_size)

        state = T.tensor(state, dtype=T.float).to(self.critic_1.device)
        state_ = T.tensor(state_, dtype=T.float).to(self.critic_1.device)
        action = T.tensor(action, dtype=T.float).to(self.critic_1.device)
        reward = T.tensor(reward, dtype=T.float).to(self.critic_1.device)
        done = T.tensor(done).to(self.critic_1.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(state, actions)
        q2_new_policy = self.critic_2(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(state, actions)
        q2_new_policy = self.critic_2(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = T.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q_hat = self.scale * reward + self.gamma * value_
        q1_old_policy = self.critic_1(state, action).view(-1)
        q2_old_policy = self.critic_2(state, action).view(-1)
        critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.update_network_parameters()
Beispiel #2
0
class Agent():
    def __init__(self, alpha = 0.0003, beta = 0.0003, input_dims = [8],
                 env = None, gamma = 0.99, tau = 0.005, n_actions = 2, max_size = 1000000,
                 layer1_size = 256, layer2_size = 256, batch_size = 256, reward_scale = 2):
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.n_actions = n_actions
        self.scale = reward_scale

        self.memory = ReplayBuffer(max_size, input_dims, n_actions = n_actions)
        self.actor = ActorNetwork(alpha, input_dims, n_actions = n_actions, max_action = env.action_space.high)
        self.critic1 = CriticNetwork(beta, input_dims, n_actions = n_actions, name = 'critic1')
        self.critic2 = CriticNetwork(beta, input_dims, n_actions = n_actions, name = 'critic2')
        self.value = ValueNetwork(beta, input_dims, name = 'value')
        self.target_value = ValueNetwork(beta, input_dims, name = 'target')
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

        self.update_network_params(tau = 1)

    def choose_action(self, obs):
        state = torch.tensor([obs],dtype=torch.float32).to(self.device)
        actions, _ = self.actor.sample_normal(state, reparam = False)

        return actions.cpu().detach().numpy()[0]

    def store_trans(self, state, action, reward, new_state, done):
        self.memory.store_trans(state, action, reward, new_state, done)

    def update_network_params(self, tau = None):
        if tau is None:
            tau = self.tau
        
        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)

        for name in value_state_dict.keys():
            value_state_dict[name] = tau * value_state_dict[name].clone() + \
                (1 - tau) * target_value_state_dict[name].clone()
            
        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic1.save_checkpoint()
        self.critic2.save_checkpoint()
        print('saving models')
    def load_models(self):
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic1.load_checkpoint()
        self.critic2.load_checkpoint()
        print('loading models')

    def get_critic_val_log_prob(self, state, reparam):
        actions, log_probs = self.actor.sample_normal(state, reparam = False)
        log_probs = log_probs.view(-1)
        q1_new = self.critic1(state, actions)
        q2_new = self.critic2(state, actions)
        critic_value = torch.min(q1_new, q2_new)
        critic_value = critic_value.view(-1)

        return log_probs, critic_value

    def learn(self):
        if self.memory.mem_counter < self.batch_size:
            return
        
        state, action, reward, new_state, done = \
                self.memory.sample_buffer(self.batch_size)

        reward = torch.tensor(reward, dtype=torch.float).to(self.actor.device)
        done = torch.tensor(done).to(self.actor.device)
        state_ = torch.tensor(new_state, dtype=torch.float).to(self.actor.device)
        state = torch.tensor(state, dtype=torch.float).to(self.actor.device)
        action = torch.tensor(action, dtype=torch.float).to(self.actor.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state, reparam=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state, reparam=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)
        
        actor_loss = log_probs - critic_value
        actor_loss = torch.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic1.optimizer.zero_grad()
        self.critic2.optimizer.zero_grad()
        q_hat = self.scale*reward + self.gamma*value_
        q1_old_policy = self.critic1.forward(state, action).view(-1)
        q2_old_policy = self.critic2.forward(state, action).view(-1)
        critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic1_loss + critic2_loss
        critic_loss.backward()
        self.critic1.optimizer.step()
        self.critic2.optimizer.step()

        self.update_network_params()
Beispiel #3
0
class Agent():
    def __init__(self,
                 alpha=0.0003,
                 beta=.0003,
                 input_dims=[8],
                 env=None,
                 gamma=.99,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=256,
                 layer2_size=256,
                 tau=.005,
                 batch_size=256,
                 reward_scale=2):
        # reward scales  depends on action convention for the environment
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions
        # set up classes
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  max_action=env.action_space.high,
                                  n_actions=n_actions,
                                  name='actor')

        self.critic1 = CriticNetwork(beta,
                                     input_dims,
                                     n_actions=n_actions,
                                     name='critic_1')
        self.critic2 = CriticNetwork(beta,
                                     input_dims,
                                     n_actions=n_actions,
                                     name='critic_2')

        self.value = ValueNetwork(beta, input_dims, name='value')
        # target value
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')
        self.scale = reward_scale
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        # here we turn into a tensor
        state = T.tensor([observation]).to(self.actor.device).float()
        # print(type(state))
        actions, _ = self.actor.sample_normal(state, reparameterize=False)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)

        for name in value_state_dict:
            value_state_dict[name] = tau * value_state_dict[name].clone() + (
                1 - tau) * target_value_state_dict[name].clone()
        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        print("saving models:")
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic1.save_checkpoint()
        self.critic2.save_checkpoint()

    def load_models(self):
        print("loading models:")
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic1.load_checkpoint()
        self.critic2.load_checkpoint()

    def learn(self):
        #  must fully load up memory, otherwise must keep learning
        if self.memory.mem_cntr < self.batch_size:
            return
        state, action, reward, new_state, done = self.memory.sample_buffer(
            self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.actor.device)
        done = T.tensor(done).to(self.actor.device)
        state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device)
        state = T.tensor(state, dtype=T.float).to(self.actor.device)
        action = T.tensor(action, dtype=T.float).to(self.actor.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = .5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = T.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic1.optimizer.zero_grad()
        self.critic2.optimizer.zero_grad()
        q_hat = self.scale * reward + self.gamma * value_
        q1_old_policy = self.critic1.forward(state, action).view(-1)
        q2_old_policy = self.critic2.forward(state, action).view(-1)
        critic_1_loss = .5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = .5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic1.optimizer.step()
        self.critic2.optimizer.step()

        self.update_network_parameters()