Ejemplo n.º 1
0
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 env,
                 env_id,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 layer_1_size=256,
                 layer_2_size=256,
                 batch_size=100,
                 reward_scale=2):

        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)

        self.batch_size = batch_size
        self.n_actions = n_actions
        self.scale = reward_scale
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer_1_size,
                                  layer_2_size,
                                  n_actions=n_actions,
                                  name=env_id + '_actor',
                                  max_action=env.action_space.high)

        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      layer_1_size,
                                      layer_2_size,
                                      n_actions=n_actions,
                                      name=env_id + '_critic_1')

        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      layer_1_size,
                                      layer_2_size,
                                      n_actions=n_actions,
                                      name=env_id + '_critic_2')

        self.value = ValueNetwork(beta,
                                  input_dims,
                                  layer_1_size,
                                  layer_2_size,
                                  name=env_id + '_value')

        self.target_value = ValueNetwork(beta,
                                         input_dims,
                                         layer_1_size,
                                         layer_2_size,
                                         name=env_id + '_target_value')

        self.update_network_parameters(tau=1)
Ejemplo n.º 2
0
    def __init__(self, alpha = 0.0003, beta = 0.0003, input_dims = [8],
                 env = None, gamma = 0.99, tau = 0.005, n_actions = 2, max_size = 1000000,
                 layer1_size = 256, layer2_size = 256, batch_size = 256, reward_scale = 2):
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.n_actions = n_actions
        self.scale = reward_scale

        self.memory = ReplayBuffer(max_size, input_dims, n_actions = n_actions)
        self.actor = ActorNetwork(alpha, input_dims, n_actions = n_actions, max_action = env.action_space.high)
        self.critic1 = CriticNetwork(beta, input_dims, n_actions = n_actions, name = 'critic1')
        self.critic2 = CriticNetwork(beta, input_dims, n_actions = n_actions, name = 'critic2')
        self.value = ValueNetwork(beta, input_dims, name = 'value')
        self.target_value = ValueNetwork(beta, input_dims, name = 'target')
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

        self.update_network_params(tau = 1)
Ejemplo n.º 3
0
 def __init__(self, input_size, output_size, hidden = 256, lr_actor=1.0e-3, lr_critic=1.0e-3, agent_number=0, tau=1.0e-2,
              gamma=0.99, epsilon=1.0, epsilon_decay=0.99, weight_decay=0, clipgrad=.1, seed = 42):
     super(Agent, self).__init__()
     
     self.seed = seed
     self.actor         = ActorNetwork(input_size, output_size, name=f"Actor_Agent{agent_number}").to(device)
     self.critic        = CriticNetwork(input_size, output_size, name=f"Critic_Agent{agent_number}").to(device)
     self.target_actor  = ActorNetwork(input_size, output_size, name=f"Actor_Target_Agent{agent_number}").to(device)
     self.target_critic = CriticNetwork(input_size, output_size, name=f"Critic_Target_Agent{agent_number}").to(device)
     
     
     
     self.noise = OUActionNoise(mu=np.zeros(output_size))
     self.tau = tau
     self.epsilon = epsilon
     self.epsilon_decay=epsilon_decay
     self.gamma = gamma
     self.clipgrad = clipgrad
     
     self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor)
     self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay)
Ejemplo n.º 4
0
    def __init__(self,
                 alpha=0.0003,
                 beta=.0003,
                 input_dims=[8],
                 env=None,
                 gamma=.99,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=256,
                 layer2_size=256,
                 tau=.005,
                 batch_size=256,
                 reward_scale=2):
        # reward scales  depends on action convention for the environment
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions
        # set up classes
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  max_action=env.action_space.high,
                                  n_actions=n_actions,
                                  name='actor')

        self.critic1 = CriticNetwork(beta,
                                     input_dims,
                                     n_actions=n_actions,
                                     name='critic_1')
        self.critic2 = CriticNetwork(beta,
                                     input_dims,
                                     n_actions=n_actions,
                                     name='critic_2')

        self.value = ValueNetwork(beta, input_dims, name='value')
        # target value
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')
        self.scale = reward_scale
        self.update_network_parameters(tau=1)
    def __init__(self, environment, initial_hyper_parameters, id,
                 log_file_name):
        self.environment_name = environment
        self.environment = gym.make('{}'.format(self.environment_name))
        self.episode_finished = False
        self.log_file_name = log_file_name
        self.state = self.environment.reset()
        self.next_state = []
        self.cum_sum = 0
        self.episode_num = 0
        self.episode_rewards = []
        self.id = id

        self.hyper_parameters = initial_hyper_parameters
        # these are the parameters we want to use with population based training
        self.actor_learning_rate = self.hyper_parameters['actor_learning_rate']
        self.critic_learning_rate = self.hyper_parameters[
            'critic_learning_rate']
        self.discount_factor = self.hyper_parameters['discount_factor']
        # We're going to use one network for all of our minions
        self.actor_network = ActorNetwork(observation_dims=4,
                                          output_dims=2,
                                          name=f'Agent {self.id} Actor')
        self.critic_network = CriticNetwork(observation_dims=4,
                                            output_dims=1,
                                            name=f'Agent {self.id} Critic')
        self.actor_network.compile(optimizer=tf.keras.optimizers.RMSprop(
            learning_rate=self.actor_learning_rate))
        self.critic_network.compile(optimizer=tf.keras.optimizers.RMSprop(
            learning_rate=self.critic_learning_rate))
        # Since Actor-Critic is an on-policy method, we will not use a replay buffer
        self.states = []
        self.actions = []
        self.next_states = []
        self.rewards = []
        self.dones = []
        self.losses = []
        self.scores = []
Ejemplo n.º 6
0
class Agent():
    def __init__(self, alpha = 0.0003, beta = 0.0003, input_dims = [8],
                 env = None, gamma = 0.99, tau = 0.005, n_actions = 2, max_size = 1000000,
                 layer1_size = 256, layer2_size = 256, batch_size = 256, reward_scale = 2):
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.n_actions = n_actions
        self.scale = reward_scale

        self.memory = ReplayBuffer(max_size, input_dims, n_actions = n_actions)
        self.actor = ActorNetwork(alpha, input_dims, n_actions = n_actions, max_action = env.action_space.high)
        self.critic1 = CriticNetwork(beta, input_dims, n_actions = n_actions, name = 'critic1')
        self.critic2 = CriticNetwork(beta, input_dims, n_actions = n_actions, name = 'critic2')
        self.value = ValueNetwork(beta, input_dims, name = 'value')
        self.target_value = ValueNetwork(beta, input_dims, name = 'target')
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

        self.update_network_params(tau = 1)

    def choose_action(self, obs):
        state = torch.tensor([obs],dtype=torch.float32).to(self.device)
        actions, _ = self.actor.sample_normal(state, reparam = False)

        return actions.cpu().detach().numpy()[0]

    def store_trans(self, state, action, reward, new_state, done):
        self.memory.store_trans(state, action, reward, new_state, done)

    def update_network_params(self, tau = None):
        if tau is None:
            tau = self.tau
        
        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)

        for name in value_state_dict.keys():
            value_state_dict[name] = tau * value_state_dict[name].clone() + \
                (1 - tau) * target_value_state_dict[name].clone()
            
        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic1.save_checkpoint()
        self.critic2.save_checkpoint()
        print('saving models')
    def load_models(self):
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic1.load_checkpoint()
        self.critic2.load_checkpoint()
        print('loading models')

    def get_critic_val_log_prob(self, state, reparam):
        actions, log_probs = self.actor.sample_normal(state, reparam = False)
        log_probs = log_probs.view(-1)
        q1_new = self.critic1(state, actions)
        q2_new = self.critic2(state, actions)
        critic_value = torch.min(q1_new, q2_new)
        critic_value = critic_value.view(-1)

        return log_probs, critic_value

    def learn(self):
        if self.memory.mem_counter < self.batch_size:
            return
        
        state, action, reward, new_state, done = \
                self.memory.sample_buffer(self.batch_size)

        reward = torch.tensor(reward, dtype=torch.float).to(self.actor.device)
        done = torch.tensor(done).to(self.actor.device)
        state_ = torch.tensor(new_state, dtype=torch.float).to(self.actor.device)
        state = torch.tensor(state, dtype=torch.float).to(self.actor.device)
        action = torch.tensor(action, dtype=torch.float).to(self.actor.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state, reparam=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state, reparam=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = torch.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)
        
        actor_loss = log_probs - critic_value
        actor_loss = torch.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic1.optimizer.zero_grad()
        self.critic2.optimizer.zero_grad()
        q_hat = self.scale*reward + self.gamma*value_
        q1_old_policy = self.critic1.forward(state, action).view(-1)
        q2_old_policy = self.critic2.forward(state, action).view(-1)
        critic1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic1_loss + critic2_loss
        critic_loss.backward()
        self.critic1.optimizer.step()
        self.critic2.optimizer.step()

        self.update_network_params()
class Agent:
    def __init__(self, environment, initial_hyper_parameters, id,
                 log_file_name):
        self.environment_name = environment
        self.environment = gym.make('{}'.format(self.environment_name))
        self.episode_finished = False
        self.log_file_name = log_file_name
        self.state = self.environment.reset()
        self.next_state = []
        self.cum_sum = 0
        self.episode_num = 0
        self.episode_rewards = []
        self.id = id

        self.hyper_parameters = initial_hyper_parameters
        # these are the parameters we want to use with population based training
        self.actor_learning_rate = self.hyper_parameters['actor_learning_rate']
        self.critic_learning_rate = self.hyper_parameters[
            'critic_learning_rate']
        self.discount_factor = self.hyper_parameters['discount_factor']
        # We're going to use one network for all of our minions
        self.actor_network = ActorNetwork(observation_dims=4,
                                          output_dims=2,
                                          name=f'Agent {self.id} Actor')
        self.critic_network = CriticNetwork(observation_dims=4,
                                            output_dims=1,
                                            name=f'Agent {self.id} Critic')
        self.actor_network.compile(optimizer=tf.keras.optimizers.RMSprop(
            learning_rate=self.actor_learning_rate))
        self.critic_network.compile(optimizer=tf.keras.optimizers.RMSprop(
            learning_rate=self.critic_learning_rate))
        # Since Actor-Critic is an on-policy method, we will not use a replay buffer
        self.states = []
        self.actions = []
        self.next_states = []
        self.rewards = []
        self.dones = []
        self.losses = []
        self.scores = []

    def save_models(self):
        # print('... saving models ...')
        self.actor_network.save_weights(self.actor_network.checkpoint_file)
        self.critic_network.save_weights(self.critic_network.checkpoint_file)

    def load_models(self):
        # print('... loading models ...')
        self.actor_network.load_weights(self.actor_network.checkpoint_file)
        self.critic_network.load_weights(self.critic_network.checkpoint_file)

    def choose_action(self, state):
        action_logits = self.actor_network(tf.convert_to_tensor([state]))
        action_probabilities = tf.nn.softmax(action_logits)
        action_distribution = tfp.distributions.Categorical(
            probs=action_probabilities, dtype=tf.float32)
        action = action_distribution.sample()

        return int(action.numpy()[0])

    def play(self):
        done = False
        while not done:
            action_to_take = self.choose_action(self.state)
            next_state, reward, done, _ = self.environment.step(action_to_take)
            # self.environment.render()
            self.cum_sum += reward
            self.learn(self.state, action_to_take, reward, next_state, done)
            self.state = next_state

        if done:
            self.state = self.environment.reset()
            self.episode_num += 1
            self.episode_rewards.append(self.cum_sum)
            self.scores.append(self.cum_sum)

            f = open(f'{self.environment_name}-{self.log_file_name}.csv', 'a')
            f.write(f'{self.id},'
                    f'{self.episode_num},'
                    f'{self.cum_sum},'
                    f'{self.actor_network.optimizer.learning_rate.numpy()},'
                    f'{self.critic_network.optimizer.learning_rate.numpy()}\n')
            f.close()

            if self.episode_num % 50 == 0:
                print(self.id, ' -> ', self.episode_num, ' -> ',
                      np.mean(self.episode_rewards))
                self.episode_rewards.clear()

            self.cum_sum = 0

    def learn(self, state, action, reward, next_state, done):
        with tf.GradientTape() as tape1, tf.GradientTape() as tape2:
            # Start calculating the Actor and Critic losses for each minion's experience
            action_logits = self.actor_network(tf.convert_to_tensor([state]))
            state_values = self.critic_network(tf.convert_to_tensor([state]))
            next_state_values = self.critic_network(
                tf.convert_to_tensor([next_state]))
            action_probabilities = tf.nn.softmax(action_logits)
            # We'll be using an advantage function
            action_distributions = tfp.distributions.Categorical(
                probs=action_probabilities, dtype=tf.float32)
            log_probs = action_distributions.log_prob(action)
            advantage = reward + self.discount_factor * next_state_values * (
                1 - int(done)) - state_values
            entropy = -1 * tf.math.reduce_sum(
                action_probabilities * tf.math.log(action_probabilities))
            actor_loss = -log_probs * advantage - self.hyper_parameters[
                'entropy_coefficient'] * entropy
            critic_loss = advantage**2

            # Optimize master's network with the mean of all the losses
            actor_grads = tape1.gradient(
                actor_loss, self.actor_network.trainable_variables)
            critic_grads = tape2.gradient(
                critic_loss, self.critic_network.trainable_variables)
            self.actor_network.optimizer.apply_gradients(
                zip(actor_grads, self.actor_network.trainable_variables))
            self.critic_network.optimizer.apply_gradients(
                zip(critic_grads, self.critic_network.trainable_variables))
            self.losses.append(actor_loss.numpy())
Ejemplo n.º 8
0
    def __init__(self,
                 state_size,
                 action_size,
                 lr_actor=LR_ACTOR,
                 lr_critic=LR_CRITIC,
                 random_seed=42,
                 num_agents=1):
        """Initialize Agent object.
        
        Params
        ====
            state_size (int): Dimension of each state
            action_size (int): Dimension of each action
            lr_actor (float): Learning rate for actor model
            lr_critic (float): Learning Rate for critic model
            random_seed (int): Random seed
            num_agents (int): Number of agents
            
        return 
        ====
            None
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(random_seed)
        self.num_agents = num_agents

        # Initialize time step (for updating every hyperparameters["update_every"] steps)
        self.t_step = 0

        # Actor network
        self.actor = ActorNetwork(lr_actor,
                                  state_size,
                                  action_size,
                                  random_seed,
                                  name="actor")
        self.actor_target = ActorNetwork(lr_actor,
                                         state_size,
                                         action_size,
                                         random_seed,
                                         name="actor_target")

        self.soft_update(self.actor, self.actor_target, tau=1)

        # Critic network
        self.critic = CriticNetwork(lr_critic,
                                    state_size,
                                    action_size,
                                    random_seed,
                                    name="critic")
        self.critic_target = CriticNetwork(lr_critic,
                                           state_size,
                                           action_size,
                                           random_seed,
                                           name="critic_target")

        self.soft_update(self.critic, self.critic_target, tau=1)

        # Noise process
        self.noise = OUActionNoise(mu=np.zeros(action_size))

        # Replay buffer memory
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE,
                                   random_seed)
Ejemplo n.º 9
0
class Agent():
    """Interacts with and learns from the environment."""
    def __init__(self,
                 state_size,
                 action_size,
                 lr_actor=LR_ACTOR,
                 lr_critic=LR_CRITIC,
                 random_seed=42,
                 num_agents=1):
        """Initialize Agent object.
        
        Params
        ====
            state_size (int): Dimension of each state
            action_size (int): Dimension of each action
            lr_actor (float): Learning rate for actor model
            lr_critic (float): Learning Rate for critic model
            random_seed (int): Random seed
            num_agents (int): Number of agents
            
        return 
        ====
            None
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(random_seed)
        self.num_agents = num_agents

        # Initialize time step (for updating every hyperparameters["update_every"] steps)
        self.t_step = 0

        # Actor network
        self.actor = ActorNetwork(lr_actor,
                                  state_size,
                                  action_size,
                                  random_seed,
                                  name="actor")
        self.actor_target = ActorNetwork(lr_actor,
                                         state_size,
                                         action_size,
                                         random_seed,
                                         name="actor_target")

        self.soft_update(self.actor, self.actor_target, tau=1)

        # Critic network
        self.critic = CriticNetwork(lr_critic,
                                    state_size,
                                    action_size,
                                    random_seed,
                                    name="critic")
        self.critic_target = CriticNetwork(lr_critic,
                                           state_size,
                                           action_size,
                                           random_seed,
                                           name="critic_target")

        self.soft_update(self.critic, self.critic_target, tau=1)

        # Noise process
        self.noise = OUActionNoise(mu=np.zeros(action_size))

        # Replay buffer memory
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE,
                                   random_seed)

    def step(self, states, actions, rewards, next_states, dones):
        """Save experience in replay memory, and use random sample from buffer to learn."""
        # Save experience / reward
        # Support for multi agents learners
        for state, action, reward, next_state, done in zip(
                states, actions, rewards, next_states, dones):
            self.memory.add(state, action, reward, next_state, done)
        # Update timestep to learn
        self.t_step = (self.t_step + 1) % UPDATE_EVERY
        # Learn, if enough samples are available in memory
        if len(self.memory) > BATCH_SIZE and self.t_step == 0:
            experiences = self.memory.sample()
            self.learn(experiences, GAMMA)

    def act(self, state, add_noise=True):
        """Returns actions for given state as per current policy."""
        states = T.from_numpy(state).float().to(device)
        self.actor.eval()
        with T.no_grad():
            actions = self.actor(states).cpu().data.numpy()
        self.actor.train()

        if add_noise:
            actions += self.noise.sample()
        return np.clip(actions, -1, 1)

    def reset(self):
        self.noise.reset()

    def learn(self, experiences, gamma):
        """Update policy and value parameters using given batch of experience tuples.
        Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
        where:
            actor_target(state) -> action
            critic_target(state, action) -> Q-value
        Params
        ======
            experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples 
            gamma (float): discount factor
        """
        states, actions, rewards, next_states, dones = experiences

        # ---------------------------- update critic ---------------------------- #
        # Get predicted next-state actions and Q values from target models
        actions_next = self.actor_target(next_states)
        Q_targets_next = self.critic_target(next_states, actions_next)
        # Compute Q targets for current states (y_i)
        Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
        # Compute critic loss
        Q_expected = self.critic(states, actions)
        critic_loss = F.mse_loss(Q_expected, Q_targets)
        # Minimize the loss
        self.critic.optimizer.zero_grad()
        critic_loss.backward()
        T.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic.optimizer.step()

        # ---------------------------- update actor ---------------------------- #
        # Compute actor loss
        actions_pred = self.actor(states)
        actor_loss = -self.critic(states, actions_pred).mean()
        # Minimize the loss
        self.actor.optimizer.zero_grad()
        actor_loss.backward()
        self.actor.optimizer.step()

        # ----------------------- update target networks ----------------------- #
        self.soft_update(self.critic, self.critic_target, TAU)
        self.soft_update(self.actor, self.actor_target, TAU)

    def soft_update(self, local_model, target_model, tau):
        """Soft update model parameters.
        θ_target = τ*θ_local + (1 - τ)*θ_target
        Params
        ======
            local_model: PyTorch model (weights will be copied from)
            target_model: PyTorch model (weights will be copied to)
            tau (float): interpolation parameter 
        """
        for target_param, local_param in zip(target_model.parameters(),
                                             local_model.parameters()):
            target_param.data.copy_(tau * local_param.data +
                                    (1.0 - tau) * target_param.data)

    def save_models(self):
        """ Save models weights """
        self.actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.actor_target.save_checkpoint()
        self.critic_target.save_checkpoint()

    def load_models(self):
        """ Load models weights """
        self.actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.actor_target.load_checkpoint()
        self.critic_target.load_checkpoint()
Ejemplo n.º 10
0
 def getActorNetwork(self):
     # self.ActorNetwork = ActorNetwork(self.config)
     return ActorNetwork(self.config)
Ejemplo n.º 11
0
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 env,
                 gamma=0.99,
                 update_actor_interval=2,
                 warmup=1000,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=400,
                 layer2_size=300,
                 batch_size=100,
                 noise=0.1):

        self.gamma = gamma
        self.tau = tau
        self.max_action = env.action_space.high
        self.min_action = env.action_space.low
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.learn_step_cntr = 0
        self.time_step = 0
        self.warmup = warmup
        self.n_actions = n_actions
        self.update_actor_interval = update_actor_interval

        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer1_size,
                                  layer2_size,
                                  n_actions,
                                  name='actor')

        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      layer1_size,
                                      layer2_size,
                                      n_actions,
                                      name='critic_1')

        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      layer1_size,
                                      layer2_size,
                                      n_actions,
                                      name='critic_2')

        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         layer1_size,
                                         layer2_size,
                                         n_actions,
                                         name='target_actor')

        self.target_critic_1 = CriticNetwork(beta,
                                             input_dims,
                                             layer1_size,
                                             layer2_size,
                                             n_actions,
                                             name='target_critic_1')

        self.target_critic_2 = CriticNetwork(beta,
                                             input_dims,
                                             layer1_size,
                                             layer2_size,
                                             n_actions,
                                             name='target_critic_2')

        self.noise = noise
        self.update_network_parameters(tau=1)
Ejemplo n.º 12
0
class Agent():
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 env,
                 gamma=0.99,
                 update_actor_interval=2,
                 warmup=1000,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=400,
                 layer2_size=300,
                 batch_size=100,
                 noise=0.1):

        self.gamma = gamma
        self.tau = tau
        self.max_action = env.action_space.high
        self.min_action = env.action_space.low
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.learn_step_cntr = 0
        self.time_step = 0
        self.warmup = warmup
        self.n_actions = n_actions
        self.update_actor_interval = update_actor_interval

        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer1_size,
                                  layer2_size,
                                  n_actions,
                                  name='actor')

        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      layer1_size,
                                      layer2_size,
                                      n_actions,
                                      name='critic_1')

        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      layer1_size,
                                      layer2_size,
                                      n_actions,
                                      name='critic_2')

        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         layer1_size,
                                         layer2_size,
                                         n_actions,
                                         name='target_actor')

        self.target_critic_1 = CriticNetwork(beta,
                                             input_dims,
                                             layer1_size,
                                             layer2_size,
                                             n_actions,
                                             name='target_critic_1')

        self.target_critic_2 = CriticNetwork(beta,
                                             input_dims,
                                             layer1_size,
                                             layer2_size,
                                             n_actions,
                                             name='target_critic_2')

        self.noise = noise
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        if self.time_step < self.warmup:
            mu = T.tensor(np.random.normal(scale=self.noise,
                                           size=(self.n_actions,))).\
                                               to(self.actor.device)
        else:
            state = T.tensor(observation, dtype=T.float).to(self.actor.device)
            mu = self.actor(state).to(self.actor.device)
        mu_prime = mu + T.tensor(np.random.normal(scale=self.noise),
                                 dtype=T.float).to(self.actor.device)

        mu_prime = T.clamp(mu_prime, self.min_action[0], self.max_action[0])
        self.time_step += 1
        return mu_prime.cpu().detach().numpy()

    def remember(self, state, action, reward, state_, done):
        self.memory.store_transitions(state, action, reward, state_, done)

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return

        state, action, reward, state_, done =\
            self.memory.sample_buffer(self.batch_size)

        state = T.tensor(state, dtype=T.float).to(self.critic_1.device)
        state_ = T.tensor(state_, dtype=T.float).to(self.critic_1.device)
        action = T.tensor(action, dtype=T.float).to(self.critic_1.device)
        reward = T.tensor(reward, dtype=T.float).to(self.critic_1.device)
        done = T.tensor(done).to(self.critic_1.device)

        target_actions = self.target_actor(state_)
        target_actions = target_actions +\
            T.clamp(T.tensor(np.random.normal(scale=0.2)), -0.5, 0.5)
        target_actions = T.clamp(target_actions, self.min_action[0],
                                 self.max_action[0])

        q1_ = self.target_critic_1(state_, target_actions)
        q2_ = self.target_critic_2(state_, target_actions)

        q1 = self.critic_1(state, action)
        q2 = self.critic_2(state, action)

        q1_[done] = 0.0
        q2_[done] = 0.0

        q1_ = q1_.view(-1)
        q2_ = q2_.view(-1)

        critic_value_ = T.min(q1_, q2_)

        target = reward + self.gamma * critic_value_
        target = target.view(self.batch_size, 1)

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q1_loss = F.mse_loss(target, q1)
        q2_loss = F.mse_loss(target, q2)
        critic_loss = q1_loss + q2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()
        self.learn_step_cntr += 1

        if self.learn_step_cntr % self.update_actor_interval != 0:
            return

        self.actor.optimizer.zero_grad()
        actor_q1_loss = self.critic_1(state, self.actor(state))
        actor_loss = -T.mean(actor_q1_loss)
        actor_loss.backward()
        self.actor.optimizer.step()

        self.update_network_parameters()

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        actor_params = self.actor.named_parameters()
        critic_1_params = self.critic_1.named_parameters()
        critic_2_params = self.critic_2.named_parameters()
        target_actor_params = self.target_actor.named_parameters()
        target_critic_1_params = self.target_critic_1.named_parameters()
        target_critic_2_params = self.target_critic_2.named_parameters()

        critic_1_state_dict = dict(critic_1_params)
        critic_2_state_dict = dict(critic_2_params)
        actor_state_dict = dict(actor_params)
        target_critic_1_state_dict = dict(target_critic_1_params)
        target_critic_2_state_dict = dict(target_critic_2_params)
        target_actor_state_dict = dict(target_actor_params)

        for name in critic_1_state_dict:
            critic_1_state_dict[name] = tau*critic_1_state_dict[name].clone() \
                + (1-tau)*target_critic_1_state_dict[name].clone()

        for name in critic_2_state_dict:
            critic_2_state_dict[name] = tau*critic_2_state_dict[name].clone() \
                + (1-tau)*target_critic_2_state_dict[name].clone()

        for name in actor_state_dict:
            actor_state_dict[name] = tau*actor_state_dict[name].clone() + \
                                 (1-tau)*target_actor_state_dict[name].clone()

        self.target_critic_1.load_state_dict(critic_1_state_dict)
        self.target_critic_2.load_state_dict(critic_2_state_dict)
        self.target_actor.load_state_dict(actor_state_dict)

    def save_models(self):
        self.actor.save_checkpoints()
        self.critic_1.save_checkpoints()
        self.critic_2.save_checkpoints()
        self.target_actor.save_checkpoints()
        self.target_critic_1.save_checkpoints()
        self.target_critic_2.save_checkpoints()

    def load_models(self):
        self.actor.load_checkpoints()
        self.critic_1.load_checkpoints()
        self.critic_2.load_checkpoints()
        self.target_actor.load_checkpoints()
        self.target_critic_1.load_checkpoints()
        self.target_critic_2.load_checkpoints()
Ejemplo n.º 13
0
class Agent():
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 env,
                 env_id,
                 gamma=0.99,
                 n_actions=2,
                 max_size=1000000,
                 layer_1_size=256,
                 layer_2_size=256,
                 batch_size=100,
                 reward_scale=2):

        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)

        self.batch_size = batch_size
        self.n_actions = n_actions
        self.scale = reward_scale
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  layer_1_size,
                                  layer_2_size,
                                  n_actions=n_actions,
                                  name=env_id + '_actor',
                                  max_action=env.action_space.high)

        self.critic_1 = CriticNetwork(beta,
                                      input_dims,
                                      layer_1_size,
                                      layer_2_size,
                                      n_actions=n_actions,
                                      name=env_id + '_critic_1')

        self.critic_2 = CriticNetwork(beta,
                                      input_dims,
                                      layer_1_size,
                                      layer_2_size,
                                      n_actions=n_actions,
                                      name=env_id + '_critic_2')

        self.value = ValueNetwork(beta,
                                  input_dims,
                                  layer_1_size,
                                  layer_2_size,
                                  name=env_id + '_value')

        self.target_value = ValueNetwork(beta,
                                         input_dims,
                                         layer_1_size,
                                         layer_2_size,
                                         name=env_id + '_target_value')

        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        state = T.tensor([observation], dtype=T.float).to(self.actor.device)
        actions, _ = self.actor.sample_normal(state, reparameterize=False)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, state_, done):
        self.memory.store_transitions(state, action, reward, state_, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        value_params = self.value.named_parameters()
        target_value_params = self.target_value.named_parameters()

        value_state_dict = dict(value_params)
        target_value_state_dict = dict(target_value_params)

        for name in value_state_dict:
            value_state_dict[name] = tau*value_state_dict[name].clone() \
                + (1-tau)*target_value_state_dict[name].clone()

        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        print('.... saving models ....')
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic_1.save_checkpoint()
        self.critic_2.save_checkpoint()

    def load_models(self):
        print('.... loading models ....')
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic_1.load_checkpoint()
        self.critic_2.load_checkpoint()

    def learn(self):
        if self.memory.mem_cntr < self.batch_size:
            return

        state, action, reward, state_, done =\
            self.memory.sample_buffer(self.batch_size)

        state = T.tensor(state, dtype=T.float).to(self.critic_1.device)
        state_ = T.tensor(state_, dtype=T.float).to(self.critic_1.device)
        action = T.tensor(action, dtype=T.float).to(self.critic_1.device)
        reward = T.tensor(reward, dtype=T.float).to(self.critic_1.device)
        done = T.tensor(done).to(self.critic_1.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(state, actions)
        q2_new_policy = self.critic_2(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = 0.5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic_1(state, actions)
        q2_new_policy = self.critic_2(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = T.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic_1.optimizer.zero_grad()
        self.critic_2.optimizer.zero_grad()

        q_hat = self.scale * reward + self.gamma * value_
        q1_old_policy = self.critic_1(state, action).view(-1)
        q2_old_policy = self.critic_2(state, action).view(-1)
        critic_1_loss = 0.5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = 0.5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic_1.optimizer.step()
        self.critic_2.optimizer.step()

        self.update_network_parameters()
Ejemplo n.º 14
0
class Agent:
    def __init__(self, input_size, output_size, hidden = 256, lr_actor=1.0e-3, lr_critic=1.0e-3, agent_number=0, tau=1.0e-2,
                 gamma=0.99, epsilon=1.0, epsilon_decay=0.99, weight_decay=0, clipgrad=.1, seed = 42):
        super(Agent, self).__init__()
        
        self.seed = seed
        self.actor         = ActorNetwork(input_size, output_size, name=f"Actor_Agent{agent_number}").to(device)
        self.critic        = CriticNetwork(input_size, output_size, name=f"Critic_Agent{agent_number}").to(device)
        self.target_actor  = ActorNetwork(input_size, output_size, name=f"Actor_Target_Agent{agent_number}").to(device)
        self.target_critic = CriticNetwork(input_size, output_size, name=f"Critic_Target_Agent{agent_number}").to(device)
        
        
        
        self.noise = OUActionNoise(mu=np.zeros(output_size))
        self.tau = tau
        self.epsilon = epsilon
        self.epsilon_decay=epsilon_decay
        self.gamma = gamma
        self.clipgrad = clipgrad
        
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay)
       

    def act(self, state, add_noise=True):
        """Returns actions for given state as per current policy."""
        state = torch.from_numpy(state).float().unsqueeze(0).to(device) #.unsqueeze(0)
        self.actor.eval()
        with torch.no_grad():
            action = self.actor(state).cpu().squeeze(0).data.numpy()

        self.actor.train()
        if add_noise:
            action += self.noise.sample() * self.epsilon
        return np.clip(action, -1, 1)
    
    
    def learn(self, experiences):
        """Update policy and value parameters using given batch of experience tuples.
        Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
        where:
            actor_target(state) -> action
            critic_target(state, action) -> Q-value
        Params
        ======
            experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples 
            gamma (float): discount factor
        """
        states, actions, rewards, next_states, dones = experiences
        

        

        # ---------------------------- update critic ---------------------------- #
        # Get predicted next-state actions and Q values from target models
        actions_next = self.target_actor(next_states.to(device))
        #set_trace()
        Q_targets_next = self.target_critic(next_states.to(device), actions_next.to(device))
        # Compute Q targets for current states (y_i)
        Q_targets = rewards + (self.gamma * Q_targets_next * (1 - dones))
        # Compute critic loss
        Q_expected = self.critic(states, actions)
        critic_loss = f.mse_loss(Q_expected, Q_targets)
        # Minimize the loss
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        clip_grad_norm_(self.critic.parameters(), self.clipgrad)
        self.critic_optimizer.step()

        #    update actor
        # Compute actor loss
        actions_pred = self.actor(states)
        actor_loss = -self.critic(states, actions_pred).mean()
        # Minimize the loss
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        #clip_grad_norm_(self.actor.parameters(), self.clipgrad)
        self.actor_optimizer.step()

        #    update target networks
        self.soft_update(self.critic, self.target_critic )
        self.soft_update(self.actor, self.target_actor)                     
        
        #    update epsilon and noise
        self.epsilon *= self.epsilon_decay
        self.noise.reset()
    


    def reset(self):
        self.noise.reset()
    
    def soft_update(self, local_model, target_model):
        """Soft update model parameters.
        θ_target = τ*θ_local + (1 - τ)*θ_target
        Params
        ======
            local_model: PyTorch model (weights will be copied from)
            target_model: PyTorch model (weights will be copied to)
            tau (float): interpolation parameter 
        """
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(self.tau*local_param.data + (1.0-self.tau)*target_param.data)
            
    def save_models(self):
        """ Save models weights """
        self.actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.target_critic.save_checkpoint()
        
    def load_models(self):
        """ Load models weights """
        self.actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.target_critic.load_checkpoint()            
Ejemplo n.º 15
0
class Agent():
    def __init__(self,
                 alpha=0.0003,
                 beta=.0003,
                 input_dims=[8],
                 env=None,
                 gamma=.99,
                 n_actions=2,
                 max_size=1000000,
                 layer1_size=256,
                 layer2_size=256,
                 tau=.005,
                 batch_size=256,
                 reward_scale=2):
        # reward scales  depends on action convention for the environment
        self.gamma = gamma
        self.tau = tau
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions
        # set up classes
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  max_action=env.action_space.high,
                                  n_actions=n_actions,
                                  name='actor')

        self.critic1 = CriticNetwork(beta,
                                     input_dims,
                                     n_actions=n_actions,
                                     name='critic_1')
        self.critic2 = CriticNetwork(beta,
                                     input_dims,
                                     n_actions=n_actions,
                                     name='critic_2')

        self.value = ValueNetwork(beta, input_dims, name='value')
        # target value
        self.target_value = ValueNetwork(beta, input_dims, name='target_value')
        self.scale = reward_scale
        self.update_network_parameters(tau=1)

    def choose_action(self, observation):
        # here we turn into a tensor
        state = T.tensor([observation]).to(self.actor.device).float()
        # print(type(state))
        actions, _ = self.actor.sample_normal(state, reparameterize=False)
        return actions.cpu().detach().numpy()[0]

    def remember(self, state, action, reward, new_state, done):
        self.memory.store_transition(state, action, reward, new_state, done)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau

        target_value_params = self.target_value.named_parameters()
        value_params = self.value.named_parameters()

        target_value_state_dict = dict(target_value_params)
        value_state_dict = dict(value_params)

        for name in value_state_dict:
            value_state_dict[name] = tau * value_state_dict[name].clone() + (
                1 - tau) * target_value_state_dict[name].clone()
        self.target_value.load_state_dict(value_state_dict)

    def save_models(self):
        print("saving models:")
        self.actor.save_checkpoint()
        self.value.save_checkpoint()
        self.target_value.save_checkpoint()
        self.critic1.save_checkpoint()
        self.critic2.save_checkpoint()

    def load_models(self):
        print("loading models:")
        self.actor.load_checkpoint()
        self.value.load_checkpoint()
        self.target_value.load_checkpoint()
        self.critic1.load_checkpoint()
        self.critic2.load_checkpoint()

    def learn(self):
        #  must fully load up memory, otherwise must keep learning
        if self.memory.mem_cntr < self.batch_size:
            return
        state, action, reward, new_state, done = self.memory.sample_buffer(
            self.batch_size)

        reward = T.tensor(reward, dtype=T.float).to(self.actor.device)
        done = T.tensor(done).to(self.actor.device)
        state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device)
        state = T.tensor(state, dtype=T.float).to(self.actor.device)
        action = T.tensor(action, dtype=T.float).to(self.actor.device)

        value = self.value(state).view(-1)
        value_ = self.target_value(state_).view(-1)
        value_[done] = 0.0

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=False)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        self.value.optimizer.zero_grad()
        value_target = critic_value - log_probs
        value_loss = .5 * F.mse_loss(value, value_target)
        value_loss.backward(retain_graph=True)
        self.value.optimizer.step()

        actions, log_probs = self.actor.sample_normal(state,
                                                      reparameterize=True)
        log_probs = log_probs.view(-1)
        q1_new_policy = self.critic1.forward(state, actions)
        q2_new_policy = self.critic2.forward(state, actions)
        critic_value = T.min(q1_new_policy, q2_new_policy)
        critic_value = critic_value.view(-1)

        actor_loss = log_probs - critic_value
        actor_loss = T.mean(actor_loss)
        self.actor.optimizer.zero_grad()
        actor_loss.backward(retain_graph=True)
        self.actor.optimizer.step()

        self.critic1.optimizer.zero_grad()
        self.critic2.optimizer.zero_grad()
        q_hat = self.scale * reward + self.gamma * value_
        q1_old_policy = self.critic1.forward(state, action).view(-1)
        q2_old_policy = self.critic2.forward(state, action).view(-1)
        critic_1_loss = .5 * F.mse_loss(q1_old_policy, q_hat)
        critic_2_loss = .5 * F.mse_loss(q2_old_policy, q_hat)

        critic_loss = critic_1_loss + critic_2_loss
        critic_loss.backward()
        self.critic1.optimizer.step()
        self.critic2.optimizer.step()

        self.update_network_parameters()