コード例 #1
0
    def __init__(self, num_states, num_actions, action_high, action_low, gamma=GAMMA, rho=RHO,
                 std_dev=STD_DEV):
        # initialize everything
        self.actor_network = ActorNetwork(num_states, num_actions, action_high)
        self.critic_network = CriticNetwork(num_states, num_actions, action_high)
        self.actor_target = ActorNetwork(num_states, num_actions, action_high)
        self.critic_target = CriticNetwork(num_states, num_actions, action_high)

        # Making the weights equal initially
        self.actor_target.set_weights(self.actor_network.get_weights())
        self.critic_target.set_weights(self.critic_network.get_weights())

        self.buffer = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE)
        self.gamma = tf.constant(gamma)
        self.rho = rho
        self.action_high = action_high
        self.action_low = action_low
        self.num_states = num_states
        self.num_actions = num_actions
        self.noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))

        # optimizers
        self.critic_optimizer = tf.keras.optimizers.Adam(CRITIC_LR, amsgrad=True)
        self.actor_optimizer = tf.keras.optimizers.Adam(ACTOR_LR, amsgrad=True)

        # temporary variable for side effects
        self.cur_action = None

        # define update weights with tf.function for improved performance
        @tf.function(
            input_signature=[
                tf.TensorSpec(shape=(None, num_states), dtype=tf.float32),
                tf.TensorSpec(shape=(None, num_actions), dtype=tf.float32),
                tf.TensorSpec(shape=(None, 1), dtype=tf.float32),
                tf.TensorSpec(shape=(None, num_states), dtype=tf.float32),
                tf.TensorSpec(shape=(None, 1), dtype=tf.float32),
            ])
        def update_weights(s, a, r, sn, d):
            """
            Function to update weights with optimizer
            """
            with tf.GradientTape() as tape:
                # define target
                y = r + self.gamma * (1 - d) * self.critic_target([sn, self.actor_target(sn)])
                # define the delta Q
                critic_loss = tf.math.reduce_mean(tf.math.abs(y - self.critic_network([s, a])))
            critic_grad = tape.gradient(critic_loss, self.critic_network.trainable_variables)
            self.critic_optimizer.apply_gradients(
                zip(critic_grad, self.critic_network.trainable_variables))

            with tf.GradientTape() as tape:
                # define the delta mu
                actor_loss = -tf.math.reduce_mean(self.critic_network([s, self.actor_network(s)]))
            actor_grad = tape.gradient(actor_loss, self.actor_network.trainable_variables)
            self.actor_optimizer.apply_gradients(
                zip(actor_grad, self.actor_network.trainable_variables))
            return critic_loss, actor_loss

        self.update_weights = update_weights
コード例 #2
0
 def __init__(self, input_size, output_size, hidden = 256, lr_actor=1.0e-3, lr_critic=1.0e-3, agent_number=0, tau=1.0e-2,
              gamma=0.99, epsilon=1.0, epsilon_decay=0.99, weight_decay=0, clipgrad=.1, seed = 42):
     super(Agent, self).__init__()
     
     self.seed = seed
     self.actor         = ActorNetwork(input_size, output_size, name=f"Actor_Agent{agent_number}").to(device)
     self.critic        = CriticNetwork(input_size, output_size, name=f"Critic_Agent{agent_number}").to(device)
     self.target_actor  = ActorNetwork(input_size, output_size, name=f"Actor_Target_Agent{agent_number}").to(device)
     self.target_critic = CriticNetwork(input_size, output_size, name=f"Critic_Target_Agent{agent_number}").to(device)
     
     
     
     self.noise = OUActionNoise(mu=np.zeros(output_size))
     self.tau = tau
     self.epsilon = epsilon
     self.epsilon_decay=epsilon_decay
     self.gamma = gamma
     self.clipgrad = clipgrad
     
     self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor)
     self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay)
コード例 #3
0
ファイル: DDPG.py プロジェクト: AverageHomosapien/SAC-Course
    def __init__(self,
                 env_id,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 n_actions,
                 gamma=0.99,
                 max_size=1000000,
                 fc1_dims=256,
                 fc2_dims=256,
                 batch_size=256):
        print("agent fc1 dims {}, fc2 dims {}".format(fc1_dims, fc2_dims))
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.alpha = alpha
        self.beta = beta

        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.noise = OUActionNoise(mu=np.zeros(n_actions))
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  fc1_dims,
                                  fc2_dims,
                                  n_actions=n_actions,
                                  name=env_id + '_actor')
        self.critic = CriticNetwork(beta,
                                    input_dims,
                                    fc1_dims,
                                    fc2_dims,
                                    n_actions=n_actions,
                                    name=env_id + '_critic')
        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         fc1_dims,
                                         fc2_dims,
                                         n_actions=n_actions,
                                         name=env_id + '_target_actor')
        self.target_critic = CriticNetwork(beta,
                                           input_dims,
                                           fc1_dims,
                                           fc2_dims,
                                           n_actions=n_actions,
                                           name=env_id + '_target_critic')
        self.update_network_parameters(tau=1)
コード例 #4
0
def test(env_info, total_episodes=3, noise_std=0.2):
    ou_noise = OUActionNoise(mean=np.zeros(1),
                             std_deviation=float(noise_std) * np.ones(1))

    for _ in range(total_episodes):

        prev_state = env.reset()

        while True:
            env.render()

            tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
            action = policy(actor_model, tf_prev_state, ou_noise, **env_info)
            state, reward, done, info = env.step(action)

            if done:
                break
            prev_state = state
コード例 #5
0
    def __init__(self,
                 alpha,
                 beta,
                 input_dims,
                 tau,
                 n_actions,
                 gamma=0.99,
                 max_size=1000000,
                 fc1_dims=400,
                 fc2_dims=300,
                 batch_size=64):
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.alpha = alpha
        self.beta = beta

        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.noise = OUActionNoise(mu=np.zeros(n_actions))
        self.actor = ActorNetwork(alpha,
                                  input_dims,
                                  fc1_dims,
                                  fc2_dims,
                                  n_actions=n_actions,
                                  name='actor')
        self.critic = CriticNetwork(beta,
                                    input_dims,
                                    fc1_dims,
                                    fc2_dims,
                                    n_actions=n_actions,
                                    name='critic')
        self.target_actor = ActorNetwork(alpha,
                                         input_dims,
                                         fc1_dims,
                                         fc2_dims,
                                         n_actions=n_actions,
                                         name='target_actor')
        self.target_critic = CriticNetwork(beta,
                                           input_dims,
                                           fc1_dims,
                                           fc2_dims,
                                           n_actions=n_actions,
                                           name='target_critic')
        self.update_network_parameters(tau=1)
コード例 #6
0
def train(env_info, buffer, total_episodes=100, noise_std=0.2, gamma=0.99, tau=0.005):
    ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(noise_std) * np.ones(1))

    # Making the weights equal initially
    target_actor.set_weights(actor_model.get_weights())
    target_critic.set_weights(critic_model.get_weights())

    # To store reward history of each episode
    ep_reward_list = []

    for ep in tqdm(range(total_episodes)):

        prev_state = env.reset()
        episodic_reward = 0

        while True:
            tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)

            action = policy(actor_model, tf_prev_state, ou_noise, **env_info)
            # Recieve state and reward from environment.
            state, reward, done, info = env.step(action)

            buffer.record((prev_state, action, reward, state))
            episodic_reward += reward

            buffer.learn(gamma)
            update_target(target_actor.variables, actor_model.variables, tau)
            update_target(target_critic.variables, critic_model.variables, tau)

            if reward >= 40:
                print(f"-- Episode: {ep} "+"-"*20)
                print(f"Action: {action}")
                print(f"Reward: {reward}\tDone: {done}\nState: {state}")

            # End this episode when `done` is True
            if done:
                break

            prev_state = state

        ep_reward_list.append(episodic_reward)
コード例 #7
0
    def __init__(self,
                 state_size,
                 action_size,
                 lr_actor=LR_ACTOR,
                 lr_critic=LR_CRITIC,
                 random_seed=42,
                 num_agents=1):
        """Initialize Agent object.
        
        Params
        ====
            state_size (int): Dimension of each state
            action_size (int): Dimension of each action
            lr_actor (float): Learning rate for actor model
            lr_critic (float): Learning Rate for critic model
            random_seed (int): Random seed
            num_agents (int): Number of agents
            
        return 
        ====
            None
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(random_seed)
        self.num_agents = num_agents

        # Initialize time step (for updating every hyperparameters["update_every"] steps)
        self.t_step = 0

        # Actor network
        self.actor = ActorNetwork(lr_actor,
                                  state_size,
                                  action_size,
                                  random_seed,
                                  name="actor")
        self.actor_target = ActorNetwork(lr_actor,
                                         state_size,
                                         action_size,
                                         random_seed,
                                         name="actor_target")

        self.soft_update(self.actor, self.actor_target, tau=1)

        # Critic network
        self.critic = CriticNetwork(lr_critic,
                                    state_size,
                                    action_size,
                                    random_seed,
                                    name="critic")
        self.critic_target = CriticNetwork(lr_critic,
                                           state_size,
                                           action_size,
                                           random_seed,
                                           name="critic_target")

        self.soft_update(self.critic, self.critic_target, tau=1)

        # Noise process
        self.noise = OUActionNoise(mu=np.zeros(action_size))

        # Replay buffer memory
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE,
                                   random_seed)
コード例 #8
0
class Agent():
    """Interacts with and learns from the environment."""
    def __init__(self,
                 state_size,
                 action_size,
                 lr_actor=LR_ACTOR,
                 lr_critic=LR_CRITIC,
                 random_seed=42,
                 num_agents=1):
        """Initialize Agent object.
        
        Params
        ====
            state_size (int): Dimension of each state
            action_size (int): Dimension of each action
            lr_actor (float): Learning rate for actor model
            lr_critic (float): Learning Rate for critic model
            random_seed (int): Random seed
            num_agents (int): Number of agents
            
        return 
        ====
            None
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(random_seed)
        self.num_agents = num_agents

        # Initialize time step (for updating every hyperparameters["update_every"] steps)
        self.t_step = 0

        # Actor network
        self.actor = ActorNetwork(lr_actor,
                                  state_size,
                                  action_size,
                                  random_seed,
                                  name="actor")
        self.actor_target = ActorNetwork(lr_actor,
                                         state_size,
                                         action_size,
                                         random_seed,
                                         name="actor_target")

        self.soft_update(self.actor, self.actor_target, tau=1)

        # Critic network
        self.critic = CriticNetwork(lr_critic,
                                    state_size,
                                    action_size,
                                    random_seed,
                                    name="critic")
        self.critic_target = CriticNetwork(lr_critic,
                                           state_size,
                                           action_size,
                                           random_seed,
                                           name="critic_target")

        self.soft_update(self.critic, self.critic_target, tau=1)

        # Noise process
        self.noise = OUActionNoise(mu=np.zeros(action_size))

        # Replay buffer memory
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE,
                                   random_seed)

    def step(self, states, actions, rewards, next_states, dones):
        """Save experience in replay memory, and use random sample from buffer to learn."""
        # Save experience / reward
        # Support for multi agents learners
        for state, action, reward, next_state, done in zip(
                states, actions, rewards, next_states, dones):
            self.memory.add(state, action, reward, next_state, done)
        # Update timestep to learn
        self.t_step = (self.t_step + 1) % UPDATE_EVERY
        # Learn, if enough samples are available in memory
        if len(self.memory) > BATCH_SIZE and self.t_step == 0:
            experiences = self.memory.sample()
            self.learn(experiences, GAMMA)

    def act(self, state, add_noise=True):
        """Returns actions for given state as per current policy."""
        states = T.from_numpy(state).float().to(device)
        self.actor.eval()
        with T.no_grad():
            actions = self.actor(states).cpu().data.numpy()
        self.actor.train()

        if add_noise:
            actions += self.noise.sample()
        return np.clip(actions, -1, 1)

    def reset(self):
        self.noise.reset()

    def learn(self, experiences, gamma):
        """Update policy and value parameters using given batch of experience tuples.
        Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
        where:
            actor_target(state) -> action
            critic_target(state, action) -> Q-value
        Params
        ======
            experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples 
            gamma (float): discount factor
        """
        states, actions, rewards, next_states, dones = experiences

        # ---------------------------- update critic ---------------------------- #
        # Get predicted next-state actions and Q values from target models
        actions_next = self.actor_target(next_states)
        Q_targets_next = self.critic_target(next_states, actions_next)
        # Compute Q targets for current states (y_i)
        Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
        # Compute critic loss
        Q_expected = self.critic(states, actions)
        critic_loss = F.mse_loss(Q_expected, Q_targets)
        # Minimize the loss
        self.critic.optimizer.zero_grad()
        critic_loss.backward()
        T.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic.optimizer.step()

        # ---------------------------- update actor ---------------------------- #
        # Compute actor loss
        actions_pred = self.actor(states)
        actor_loss = -self.critic(states, actions_pred).mean()
        # Minimize the loss
        self.actor.optimizer.zero_grad()
        actor_loss.backward()
        self.actor.optimizer.step()

        # ----------------------- update target networks ----------------------- #
        self.soft_update(self.critic, self.critic_target, TAU)
        self.soft_update(self.actor, self.actor_target, TAU)

    def soft_update(self, local_model, target_model, tau):
        """Soft update model parameters.
        θ_target = τ*θ_local + (1 - τ)*θ_target
        Params
        ======
            local_model: PyTorch model (weights will be copied from)
            target_model: PyTorch model (weights will be copied to)
            tau (float): interpolation parameter 
        """
        for target_param, local_param in zip(target_model.parameters(),
                                             local_model.parameters()):
            target_param.data.copy_(tau * local_param.data +
                                    (1.0 - tau) * target_param.data)

    def save_models(self):
        """ Save models weights """
        self.actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.actor_target.save_checkpoint()
        self.critic_target.save_checkpoint()

    def load_models(self):
        """ Load models weights """
        self.actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.actor_target.load_checkpoint()
        self.critic_target.load_checkpoint()
コード例 #9
0
class Agent:
    def __init__(self, input_size, output_size, hidden = 256, lr_actor=1.0e-3, lr_critic=1.0e-3, agent_number=0, tau=1.0e-2,
                 gamma=0.99, epsilon=1.0, epsilon_decay=0.99, weight_decay=0, clipgrad=.1, seed = 42):
        super(Agent, self).__init__()
        
        self.seed = seed
        self.actor         = ActorNetwork(input_size, output_size, name=f"Actor_Agent{agent_number}").to(device)
        self.critic        = CriticNetwork(input_size, output_size, name=f"Critic_Agent{agent_number}").to(device)
        self.target_actor  = ActorNetwork(input_size, output_size, name=f"Actor_Target_Agent{agent_number}").to(device)
        self.target_critic = CriticNetwork(input_size, output_size, name=f"Critic_Target_Agent{agent_number}").to(device)
        
        
        
        self.noise = OUActionNoise(mu=np.zeros(output_size))
        self.tau = tau
        self.epsilon = epsilon
        self.epsilon_decay=epsilon_decay
        self.gamma = gamma
        self.clipgrad = clipgrad
        
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay)
       

    def act(self, state, add_noise=True):
        """Returns actions for given state as per current policy."""
        state = torch.from_numpy(state).float().unsqueeze(0).to(device) #.unsqueeze(0)
        self.actor.eval()
        with torch.no_grad():
            action = self.actor(state).cpu().squeeze(0).data.numpy()

        self.actor.train()
        if add_noise:
            action += self.noise.sample() * self.epsilon
        return np.clip(action, -1, 1)
    
    
    def learn(self, experiences):
        """Update policy and value parameters using given batch of experience tuples.
        Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
        where:
            actor_target(state) -> action
            critic_target(state, action) -> Q-value
        Params
        ======
            experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples 
            gamma (float): discount factor
        """
        states, actions, rewards, next_states, dones = experiences
        

        

        # ---------------------------- update critic ---------------------------- #
        # Get predicted next-state actions and Q values from target models
        actions_next = self.target_actor(next_states.to(device))
        #set_trace()
        Q_targets_next = self.target_critic(next_states.to(device), actions_next.to(device))
        # Compute Q targets for current states (y_i)
        Q_targets = rewards + (self.gamma * Q_targets_next * (1 - dones))
        # Compute critic loss
        Q_expected = self.critic(states, actions)
        critic_loss = f.mse_loss(Q_expected, Q_targets)
        # Minimize the loss
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        clip_grad_norm_(self.critic.parameters(), self.clipgrad)
        self.critic_optimizer.step()

        #    update actor
        # Compute actor loss
        actions_pred = self.actor(states)
        actor_loss = -self.critic(states, actions_pred).mean()
        # Minimize the loss
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        #clip_grad_norm_(self.actor.parameters(), self.clipgrad)
        self.actor_optimizer.step()

        #    update target networks
        self.soft_update(self.critic, self.target_critic )
        self.soft_update(self.actor, self.target_actor)                     
        
        #    update epsilon and noise
        self.epsilon *= self.epsilon_decay
        self.noise.reset()
    


    def reset(self):
        self.noise.reset()
    
    def soft_update(self, local_model, target_model):
        """Soft update model parameters.
        θ_target = τ*θ_local + (1 - τ)*θ_target
        Params
        ======
            local_model: PyTorch model (weights will be copied from)
            target_model: PyTorch model (weights will be copied to)
            tau (float): interpolation parameter 
        """
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(self.tau*local_param.data + (1.0-self.tau)*target_param.data)
            
    def save_models(self):
        """ Save models weights """
        self.actor.save_checkpoint()
        self.critic.save_checkpoint()
        self.target_actor.save_checkpoint()
        self.target_critic.save_checkpoint()
        
    def load_models(self):
        """ Load models weights """
        self.actor.load_checkpoint()
        self.critic.load_checkpoint()
        self.target_actor.load_checkpoint()
        self.target_critic.load_checkpoint()