Esempio n. 1
0
 def __init__(self, epsilon, min_epsilon, decay_rate, learning_rate, tau, gamma, batch_size,
              q_network, target_network, max_memory_length, agent_index=None):
     self.experience_memory = deque(maxlen=max_memory_length)
     self.prioritized_memory = PrioritizedReplayMemory(max_length=max_memory_length, alpha=0.6,
                                                       beta=0.4, beta_annealing_steps=500000)
     self.last_observation = None
     self.last_action = None
     self.agent_index = agent_index
     self.q_network = q_network
     self.target_network = target_network
     # epsilon is the probability of taking a random action
     self.epsilon = epsilon
     # lowest epsilon is allowed to go during training
     self.min_epsilon = min_epsilon
     # rate at which epsilon decays each episode
     self.decay_rate = decay_rate
     self.learning_rate = learning_rate
     # gamma is the discount factor
     self.gamma = gamma
     # tau is the weighting of the target network parameters when updating them with the
     # regular q network parameters. Tau = 1.0 is no longer double DQN, since it just copies
     # everything as is from the regular q network to the target network.
     self.tau = tau
     self.batch_size = batch_size
     self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self.learning_rate)
     self.criterion = nn.MSELoss()
     # Huber loss reduces sensitivity to outliers
     # self.criterion = nn.SmoothL1Loss()
     self.loss_history = []
     self.total_training_episodes = 0
Esempio n. 2
0
    def __init__(self,
                 n_states,
                 n_actions,
                 opt,
                 ouprocess=True,
                 mean_var_path=None,
                 supervised=False):
        """ DDPG Algorithms
        Args:
            n_states: int, dimension of states
            n_actions: int, dimension of actions
            opt: dict, params
            supervised, bool, pre-train the actor with supervised learning
        """
        self.n_states = n_states
        self.n_actions = n_actions

        # Params
        self.alr = opt['alr']
        self.clr = opt['clr']
        self.model_name = opt['model']
        self.batch_size = opt['batch_size']
        self.gamma = opt['gamma']
        self.tau = opt['tau']
        self.ouprocess = ouprocess

        if mean_var_path is None:
            mean = np.zeros(n_states)
            var = np.zeros(n_states)
        elif not os.path.exists(mean_var_path):
            mean = np.zeros(n_states)
            var = np.zeros(n_states)
        else:
            with open(mean_var_path, 'rb') as f:
                mean, var = pickle.load(f)

        self.normalizer = Normalizer(mean, var)

        if supervised:
            self._build_actor()
            print("Supervised Learning Initialized")
        else:
            # Build Network
            self._build_network()
            print('Finish Initializing Networks')

        # 创建一个replay memory对象,传入memory size进去。
        self.replay_memory = PrioritizedReplayMemory(
            capacity=opt['memory_size'])
        # self.replay_memory = ReplayMemory(capacity=opt['memory_size'])
        # self.noise = OUProcess(n_actions)
        print('DDPG Initialzed!')
Esempio n. 3
0
class DDPG(object):
    def __init__(self,
                 n_states,
                 n_actions,
                 opt,
                 ouprocess=True,
                 mean_var_path=None,
                 supervised=False):
        """ DDPG Algorithms
        Args:
            n_states: int, dimension of states
            n_actions: int, dimension of actions
            opt: dict, params
            supervised, bool, pre-train the actor with supervised learning
        """
        self.n_states = n_states
        self.n_actions = n_actions

        # Params
        self.alr = opt['alr']
        self.clr = opt['clr']
        self.model_name = opt['model']
        self.batch_size = opt['batch_size']
        self.gamma = opt['gamma']
        self.tau = opt['tau']
        self.ouprocess = ouprocess

        if mean_var_path is None:
            mean = np.zeros(n_states)
            var = np.zeros(n_states)
        elif not os.path.exists(mean_var_path):
            mean = np.zeros(n_states)
            var = np.zeros(n_states)
        else:
            with open(mean_var_path, 'rb') as f:
                mean, var = pickle.load(f)

        self.normalizer = Normalizer(mean, var)

        if supervised:
            self._build_actor()
            print("Supervised Learning Initialized")
        else:
            # Build Network
            self._build_network()
            print('Finish Initializing Networks')

        self.replay_memory = PrioritizedReplayMemory(
            capacity=opt['memory_size'])
        # self.replay_memory = ReplayMemory(capacity=opt['memory_size'])
        self.noise = OUProcess(n_actions)
        print('DDPG Initialzed!')

    @staticmethod
    def totensor(x):
        return Variable(torch.FloatTensor(x))

    def _build_actor(self):
        if self.ouprocess:
            noisy = False
        else:
            noisy = True
        self.actor = Actor(self.n_states, self.n_actions, noisy=noisy)
        self.actor_criterion = nn.MSELoss()
        self.actor_optimizer = optimizer.Adam(lr=self.alr,
                                              params=self.actor.parameters())

    def _build_network(self):
        if self.ouprocess:
            noisy = False
        else:
            noisy = True
        self.actor = Actor(self.n_states, self.n_actions, noisy=noisy)
        self.target_actor = Actor(self.n_states, self.n_actions)
        self.critic = Critic(self.n_states, self.n_actions)
        self.target_critic = Critic(self.n_states, self.n_actions)

        # if model params are provided, load them
        if len(self.model_name):
            self.load_model(model_name=self.model_name)
            print("Loading model from file: {}".format(self.model_name))

        # Copy actor's parameters
        self._update_target(self.target_actor, self.actor, tau=1.0)

        # Copy critic's parameters
        self._update_target(self.target_critic, self.critic, tau=1.0)

        self.loss_criterion = nn.MSELoss()
        self.actor_optimizer = optimizer.Adam(lr=self.alr,
                                              params=self.actor.parameters(),
                                              weight_decay=1e-5)
        self.critic_optimizer = optimizer.Adam(lr=self.clr,
                                               params=self.critic.parameters(),
                                               weight_decay=1e-5)

    @staticmethod
    def _update_target(target, source, tau):
        for (target_param, param) in zip(target.parameters(),
                                         source.parameters()):
            target_param.data.copy_(target_param.data * (1 - tau) +
                                    param.data * tau)

    def reset(self, sigma):
        self.noise.reset(sigma)

    def _sample_batch(self):
        batch, idx = self.replay_memory.sample(self.batch_size)
        # batch = self.replay_memory.sample(self.batch_size)
        states = map(lambda x: x[0].tolist(), batch)
        next_states = map(lambda x: x[3].tolist(), batch)
        actions = map(lambda x: x[1].tolist(), batch)
        rewards = map(lambda x: x[2], batch)
        terminates = map(lambda x: x[4], batch)

        return idx, states, next_states, actions, rewards, terminates

    def add_sample(self, state, action, reward, next_state, terminate):
        self.critic.eval()
        self.actor.eval()
        self.target_critic.eval()
        self.target_actor.eval()
        batch_state = self.normalizer([state.tolist()])
        batch_next_state = self.normalizer([next_state.tolist()])
        current_value = self.critic(batch_state,
                                    self.totensor([action.tolist()]))
        target_action = self.target_actor(batch_next_state)
        target_value = self.totensor([reward]) \
            + self.totensor([0 if x else 1 for x in [terminate]]) \
            * self.target_critic(batch_next_state, target_action) * self.gamma
        error = float(torch.abs(current_value - target_value).data.numpy()[0])

        self.target_actor.train()
        self.actor.train()
        self.critic.train()
        self.target_critic.train()
        self.replay_memory.add(error,
                               (state, action, reward, next_state, terminate))

    def update(self):
        """ Update the Actor and Critic with a batch data
        """
        idxs, states, next_states, actions, rewards, terminates = self._sample_batch(
        )
        batch_states = self.normalizer(states)  # totensor(states)
        batch_next_states = self.normalizer(
            next_states)  # Variable(torch.FloatTensor(next_states))
        batch_actions = self.totensor(actions)
        batch_rewards = self.totensor(rewards)
        mask = [0 if x else 1 for x in terminates]
        mask = self.totensor(mask)

        target_next_actions = self.target_actor(batch_next_states).detach()
        target_next_value = self.target_critic(
            batch_next_states, target_next_actions).detach().squeeze(1)

        current_value = self.critic(batch_states, batch_actions)
        next_value = batch_rewards + mask * target_next_value * self.gamma
        # Update Critic

        # update prioritized memory
        error = torch.abs(current_value - next_value).data.numpy()
        for i in range(self.batch_size):
            idx = idxs[i]
            self.replay_memory.update(idx, error[i][0])

        loss = self.loss_criterion(current_value, next_value)
        self.critic_optimizer.zero_grad()
        loss.backward()
        self.critic_optimizer.step()

        # Update Actor
        self.critic.eval()
        policy_loss = -self.critic(batch_states, self.actor(batch_states))
        policy_loss = policy_loss.mean()
        self.actor_optimizer.zero_grad()
        policy_loss.backward()

        self.actor_optimizer.step()
        self.critic.train()

        self._update_target(self.target_critic, self.critic, tau=self.tau)
        self._update_target(self.target_actor, self.actor, tau=self.tau)

        return loss.data[0], policy_loss.data[0]

    def choose_action(self, x):
        """ Select Action according to the current state
        Args:
            x: np.array, current state
        """
        self.actor.eval()
        act = self.actor(self.normalizer([x.tolist()])).squeeze(0)
        self.actor.train()
        action = act.data.numpy()
        if self.ouprocess:
            action += self.noise.noise()
        return action.clip(0, 1)

    def sample_noise(self):
        self.actor.sample_noise()

    def load_model(self, model_name):
        """ Load Torch Model from files
        Args:
            model_name: str, model path
        """
        self.actor.load_state_dict(
            torch.load('{}_actor.pth'.format(model_name)))
        self.critic.load_state_dict(
            torch.load('{}_critic.pth'.format(model_name)))

    def save_model(self, model_dir, title):
        """ Save Torch Model from files
        Args:
            model_dir: str, model dir
            title: str, model name
        """
        torch.save(self.actor.state_dict(),
                   '{}/{}_actor.pth'.format(model_dir, title))

        torch.save(self.critic.state_dict(),
                   '{}/{}_critic.pth'.format(model_dir, title))

    def save_actor(self, path):
        """ save actor network
        Args:
             path, str, path to save
        """
        torch.save(self.actor.state_dict(), path)

    def load_actor(self, path):
        """ load actor network
        Args:
             path, str, path to load
        """
        self.actor.load_state_dict(torch.load(path))

    def train_actor(self, batch_data, is_train=True):
        """ Train the actor separately with data
        Args:
            batch_data: tuple, (states, actions)
            is_train: bool
        Return:
            _loss: float, training loss
        """
        states, action = batch_data

        if is_train:
            self.actor.train()
            pred = self.actor(self.normalizer(states))
            action = self.totensor(action)

            _loss = self.actor_criterion(pred, action)

            self.actor_optimizer.zero_grad()
            _loss.backward()
            self.actor_optimizer.step()

        else:
            self.actor.eval()
            pred = self.actor(self.normalizer(states))
            action = self.totensor(action)
            _loss = self.actor_criterion(pred, action)

        return _loss.data[0]
Esempio n. 4
0
num_episodes = 10000

random_action_probability_start = 0.999  # starting random action probability
random_action_probability_end = 0.1  # ending random action probability
random_action_probability_decay = 0.9995

discount_factor = 0.99

replay_memory_size = 500000
replay_memory_initial_size = 50000
batch_size = 32

state_processor = StateProcessor()

replay_memory = PrioritizedReplayMemory(replay_memory_size)

estimator_1 = Estimator(discount_factor, "estimator_1")
estimator_2 = Estimator(discount_factor, "estimator_2")

recent_timesteps = collections.deque(maxlen=100)

global_step = 0

with tf.Session() as sess:
    random_action_probability = random_action_probability_start
    sess.run(tf.global_variables_initializer())

    # initialize replay memory
    print("Initializing replay memory")
    done = True
Esempio n. 5
0
class DQNAgent:

    def __init__(self, epsilon, min_epsilon, decay_rate, learning_rate, tau, gamma, batch_size,
                 q_network, target_network, max_memory_length, agent_index=None):
        self.experience_memory = deque(maxlen=max_memory_length)
        self.prioritized_memory = PrioritizedReplayMemory(max_length=max_memory_length, alpha=0.6,
                                                          beta=0.4, beta_annealing_steps=500000)
        self.last_observation = None
        self.last_action = None
        self.agent_index = agent_index
        self.q_network = q_network
        self.target_network = target_network
        # epsilon is the probability of taking a random action
        self.epsilon = epsilon
        # lowest epsilon is allowed to go during training
        self.min_epsilon = min_epsilon
        # rate at which epsilon decays each episode
        self.decay_rate = decay_rate
        self.learning_rate = learning_rate
        # gamma is the discount factor
        self.gamma = gamma
        # tau is the weighting of the target network parameters when updating them with the
        # regular q network parameters. Tau = 1.0 is no longer double DQN, since it just copies
        # everything as is from the regular q network to the target network.
        self.tau = tau
        self.batch_size = batch_size
        self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self.learning_rate)
        self.criterion = nn.MSELoss()
        # Huber loss reduces sensitivity to outliers
        # self.criterion = nn.SmoothL1Loss()
        self.loss_history = []
        self.total_training_episodes = 0

    def decay_epsilon(self):
        # enforce a minimum epsilon during training
        self.epsilon = max(self.epsilon*self.decay_rate, self.min_epsilon)

    def policy(self, observation, done):
        """
        Using e-greedy exploration.
        Take a random action with probability epsilon,
        otherwise take the action with the highest value given the current state (according to the Q-network)
        :return: Discrete action, int in range 0-4 inclusive
        """
        observation = torch.tensor(observation, dtype=torch.float32)
        if done:
            return None
        elif random.random() <= self.epsilon:
            action = random.randint(0, 4)
        else:
            # Feed forward the q network and take the action with highest q value
            self.q_network.eval()
            with torch.no_grad():
                qs = self.q_network(observation)
                action = np.argmax(qs.detach().numpy())
        self.q_network.train()
        return action

    def save_model(self, filename):
        print("Saving Q network...")
        if not os.path.isdir('checkpoints'):
            os.mkdir('checkpoints')
        network_state = {
            'net': self.q_network.state_dict(),
            'target': self.target_network.state_dict(),
            'epsilon': self.epsilon,
            'total_training_episodes': self.total_training_episodes
        }
        torch.save(network_state, f'./checkpoints/{filename}.pth')
        print("Save complete!")

    def load_model(self, filename):
        print("Loading model from checkpoint...")
        checkpoint = torch.load(f'./checkpoints/{filename}.pth')  # load checkpoint
        self.q_network.load_state_dict(checkpoint['net'])
        self.target_network.load_state_dict(checkpoint['target'])
        self.epsilon = checkpoint['epsilon']
        self.total_training_episodes = checkpoint['total_training_episodes']
        print("Load complete!")

    def push_memory(self, memory):
        """Push a transition memory object onto the experience deque"""
        assert (isinstance(memory, TransitionMemory))
        self.experience_memory.append(memory)

    def do_training_update(self):
        if self.batch_size == 0 or len(self.experience_memory) < self.batch_size:
            return
        # Sample experience
        states, actions, rewards, next_states, dones = self.sample_random_experience(n=self.batch_size)
        # Get q values for the current state
        current_q = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)
        next_q = self.target_network(next_states).detach()
        max_next_q = next_q.max(1)[0].unsqueeze(1)
        assert (rewards.size() == max_next_q.size())
        assert (dones.size() == max_next_q.size())
        target_q = rewards + (1 - dones) * self.gamma * max_next_q
        target_q = target_q.detach()
        target_q = target_q.squeeze()
        assert (current_q.size() == target_q.size())
        loss = self.criterion(current_q, target_q)
        self.optimizer.zero_grad()
        self.loss_history.append(loss.item())
        loss.backward()
        self.optimizer.step()

    def sample_random_experience(self, n):
        """
        Randomly sample n transitions from the experience replay memory into a batch for training
        :param n: number of transition experiences to randomly sample
        :return: tuple of tensor batches of each TransitionMemory attribute
        """
        states, actions, rewards, next_states, dones = [], [], [], [], []
        experience_sample = random.sample(self.experience_memory, n)
        for memory in experience_sample:
            states.append(memory.state)
            actions.append(memory.action)
            rewards.append([memory.reward])
            next_states.append(memory.next_state)
            dones.append([memory.done])

        return (torch.tensor(states, dtype=torch.float32),
                torch.tensor(actions, dtype=torch.long),
                torch.tensor(rewards, dtype=torch.float32),
                torch.tensor(next_states, dtype=torch.float32),
                torch.tensor(dones, dtype=torch.int8))

    def do_prioritized_training_update(self, frame):
        if self.batch_size == 0 or len(self.prioritized_memory.memory) < self.batch_size:
            return
        # Sample prioritized experience
        states, actions, rewards, next_states, dones, importance_sampling_weights, selected_indices = self.prioritized_memory.sample(self.batch_size)
        # Get q values for the current state
        current_q = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)
        next_q = self.target_network(next_states).detach()
        max_next_q = next_q.max(1)[0].unsqueeze(1)
        assert (rewards.size() == max_next_q.size())
        assert (dones.size() == max_next_q.size())
        target_q = rewards + (1 - dones) * self.gamma * max_next_q
        target_q = target_q.detach()
        target_q = target_q.squeeze()
        assert (current_q.size() == target_q.size())
        loss = (current_q - target_q).pow(2)
        assert (loss.size() == importance_sampling_weights.size())
        # Multiply the TD errors by the importance sampling weights
        loss = loss * importance_sampling_weights
        new_priorities = loss + 0.00001
        loss = torch.mean(loss)
        self.optimizer.zero_grad()
        self.loss_history.append(loss.item())
        loss.backward()
        self.optimizer.step()
        # Update priorities for the indices selected in the batch
        self.prioritized_memory.update_priorities(selected_indices, new_priorities.detach().numpy())
        self.prioritized_memory.anneal_beta(frame)

    def update_target_network(self):
        for source_parameters, target_parameters in zip(self.q_network.parameters(), self.target_network.parameters()):
            target_parameters.data.copy_(self.tau * source_parameters.data + (1.0 - self.tau) * target_parameters.data)