Exemplo n.º 1
0
class Agent():
    def __init__(self, state_size, action_size, seed):

        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(seed)

        # Q-Network
        self.qnetwork_local = QNetwork(state_size, action_size, seed)
        self.qnetwork_target = QNetwork(state_size, action_size, seed)
        self.qnetwork_local.load_model("./dqn_LL_model data.pickle")
        self.qnetwork_target.load_model("./dqn_LL_model data.pickle")
        # Replay memory
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
        # Initialize time step (for updating every UPDATE_EVERY steps)
        self.t_step = 0
        self.loss = 0
        self.loss_list = []

    def step(self, state, action, reward, next_state, done, t_step):
        # Save experience in replay memory
        self.memory.add(state, action, reward, next_state, done)

        # Learn every UPDATE_EVERY time steps.
        self.t_step = t_step
        if self.t_step % UPDATE_EVERY == 0:
            # If enough samples are available in memory, get random subset and learn
            if len(self.memory) > 100 * BATCH_SIZE:
                experiences = self.memory.sample()
                self.learn(experiences, GAMMA)

    def act(self, state, eps=0.):
        """Returns actions for given state as per current policy.
    
        """

        action_values = self.qnetwork_local.forward(state)

        # Epsilon-greedy action selection
        if random.random() > eps:
            return np.argmax(action_values)
        else:
            return random.choice(np.arange(self.action_size))

    def learn(self, experiences, gamma):
        """Update value parameters using given batch of experience tuples.
        ======
            experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples 
            
        """
        states, actions, rewards, next_states, dones = experiences

        for time in range(BATCH_SIZE):
            # compute Q_target from the target network inputing next_state
            Q_target_av = np.max(
                self.qnetwork_target.forward(next_states[time]))
            Q_target = rewards[time] + gamma * (Q_target_av) * (
                1 - dones[time])  # if done, than the second will not be added
            # compute the Q_expected
            Q_expected = self.qnetwork_local.forward(
                states[time]
            )  # get q value for corrosponding action along dimension 1 of 64,4 matrix

            self.qnetwork_local.backward(Q_target, "MSE", actions[time])
            self.loss_list.append((Q_target - Q_expected[actions[time]])**2)
        self.loss = np.mean(self.loss_list)
        self.qnetwork_local.step()
        self.loss_list.clear()

        #  update target network #
        if self.t_step % UPDATE_FREQUENCY == 0:
            self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)

    def soft_update(self, local_model, target_model, tau):
        """Soft update model parameters.
        θ_target = tau*θ_local + (1 - tau)*θ_target
        """
        self.qnetwork_target.soft_update(local_model, TAU)
Exemplo n.º 2
0
class Agent():
    """Interacts with and learns from the environment."""
    def __init__(self, state_size, action_size, seed, compute_weights=False):
        """Initialize an Agent object.
        
        Params
        ======
            state_size (int): dimension of each state
            action_size (int): dimension of each action
            seed (int): random seed
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(seed)
        self.compute_weights = compute_weights

        # Algorithms to enable during training
        self.PrioritizedReplayBuffer = True  # Use False to enable uniform sampling
        self.HardTargetUpdate = True  # Use False to enable soft target update

        # building the policy and target Q-networks for the agent, such that the target Q-network is kept frozen to avoid the training instability issues
        # Q-Network
        self.qnetwork_local = QNetwork(state_size, action_size,
                                       seed).to(device)  # main policy network
        self.qnetwork_target = QNetwork(state_size, action_size,
                                        seed).to(device)  # target network
        self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
        self.criterion = nn.MSELoss()

        # Replay memory
        # building the experience replay memory used to avoid training instability issues
        # Below: PER
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE,
                                   EXPERIENCES_PER_SAMPLING, seed,
                                   compute_weights)

        # Below: Uniform by method defined in this script
        #self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)

        # Initialize time step (for updating every UPDATE_NN_EVERY steps)
        self.t_step_nn = 0
        # Initialize time step (for updating every UPDATE_MEM_PAR_EVERY steps)
        self.t_step_mem_par = 0
        # Initialize time step (for updating every UPDATE_MEM_EVERY steps)
        self.t_step_mem = 0

    def step(self, state, action, reward, next_state, done):
        # Save experience in replay memory
        self.memory.add(state, action, reward, next_state, done)

        # Learn every UPDATE_NN_EVERY time steps.
        self.t_step_nn = (self.t_step_nn + 1) % UPDATE_NN_EVERY
        self.t_step_mem = (self.t_step_mem + 1) % UPDATE_MEM_EVERY
        self.t_step_mem_par = (self.t_step_mem_par + 1) % UPDATE_MEM_PAR_EVERY
        if self.t_step_mem_par == 0:
            self.memory.update_parameters()
        if self.t_step_nn == 0:
            # If enough samples are available in memory, get random subset and learn
            if self.memory.experience_count > EXPERIENCES_PER_SAMPLING:
                sampling = self.memory.sample()
                self.learn(sampling, GAMMA)
        if self.t_step_mem == 0:
            self.memory.update_memory_sampling()

    def act(self, state, eps=0.):
        """A function to select an action based on the Epsilon greedy policy. Epislon percent of times the agent will select a random
        action while 1-Epsilon percent of the time the agent will select the action with the highest Q value as predicted by the
        neural network.
        
        Params
        ======
            state (array_like): current state
            eps (float): epsilon, for epsilon-greedy action selection
        """
        state = torch.from_numpy(state).float().unsqueeze(0).to(device)

        # here we calculate action values (Q values)
        self.qnetwork_local.eval(
        )  # model deactivate norm, dropout etc. layers as it is expected
        with torch.no_grad():
            action_values = self.qnetwork_local(state)
        self.qnetwork_local.train(
        )  # model.train() sets the modules in the network in training mode

        # Epsilon-greedy action selection
        if random.random() > eps:
            return np.argmax(action_values.cpu().data.cpu().numpy())
        else:
            return random.choice(np.arange(self.action_size))

    def learn(self, sampling, gamma):
        """Update value parameters using given batch of experience tuples.
        Function for training the neural network. The function will update the weights of the newtwork

        Params
        ======
            sampling (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples 
            gamma (float): discount factor
        """
        states, actions, rewards, next_states, dones, weights, indices = sampling

        # Target (absolute) Q values from target Q network
        q_target = self.qnetwork_target(next_states).detach().max(
            1)[0].unsqueeze(1)
        # Predictions from local Q network
        expected_values = rewards + gamma * q_target * (1 - dones)
        output = self.qnetwork_local(states).gather(1, actions)
        # computing the loss
        loss = F.mse_loss(output,
                          expected_values)  # Loss Function: Mean Square Error
        if self.compute_weights:
            with torch.no_grad():
                weight = sum(np.multiply(weights, loss.data.cpu().numpy()))
            loss *= weight
        # Minimizing the loss by optimizer
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        # ------------------- update target network ------------------- #
        self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)

        # ------------------- update priorities ------------------- #
        delta = abs(expected_values - output.detach()).cpu().numpy()
        #print("delta", delta)
        self.memory.update_priorities(delta, indices)

    def soft_update(self, local_model, target_model, tau):
        """Soft update model parameters.
        θ_target = τ*θ_local + (1 - τ)*θ_target

        Params
        ======
            local_model (PyTorch model): weights will be copied from
            target_model (PyTorch model): weights will be copied to
            tau (float): interpolation parameter 
        """
        for target_param, local_param in zip(target_model.parameters(),
                                             local_model.parameters()):
            target_param.data.copy_(tau * local_param.data +
                                    (1.0 - tau) * target_param.data)

    # def hard_update(self):
    # """ This hard_update method performs direct update of target network
    # weight update from local network weights instantly"""

    # Write the algorithm here

    def load_models(self, policy_net_filename, target_net_filename):
        """ Function to load the parameters of the policy and target models """
        print('Loading model...')
        self.qnetwork_local.load_model(policy_net_filename)
        self.qnetwork_target.load_model(target_net_filename)