class Model_Server(object):
    def __init__(self, env, hyper_params, memory_server):
        """
            input_len The input length of the neural network. It equals to the length of the state vector.
            output_len: The output length of the neural network. It is equal to the action space.
            eval_model: The model for predicting action for the agent.
            target_model: The model for calculating Q-value of next_state to update 'eval_model'.
            use_target_model: Trigger for turn 'target_model' on/off
        """
        self.beta = hyper_params['beta']

        state = env.reset()
        action_space = len(ACTION_DICT)
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len,
                                   output_len,
                                   learning_rate=hyper_params['learning_rate'])
        self.use_target_model = hyper_params['use_target_model']
        if self.use_target_model:
            self.target_model = DQNModel(input_len, output_len)

        self.memory_server = memory_server

    def update_batch(self, batch_size):

        batch = ray.get(self.memory_server.sample.remote(batch_size))

        (states, actions, reward, next_states, is_terminal) = batch

        states = states
        next_states = next_states
        terminal = FloatTensor([0 if t else 1 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(batch_size, dtype=torch.long)

        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]

        # Calculate target
        if self.use_target_model:
            actions, q_next = self.target_model.predict_batch(next_states)
            q_next = q_next[batch_index, actions]
        else:
            actions, q_next = self.eval_model.predict_batch(next_states)
            q_next = q_next[batch_index, actions]

        #INSERT YOUR CODE HERE --- neet to compute 'q_targets' used below
        q_max = q_next * terminal
        q_target = reward + self.beta * q_max

        # update model
        self.eval_model.fit(q_values, q_target)

    def replace_target(self):
        self.target_model.replace(self.eval_model)

    def greedy_policy(self, state):
        return self.eval_model.predict(state)
Exemple #2
0
class DQN_Model_Server():
    def __init__(self, env, hyper_params, batch_size, update_steps, memory_size, beta, model_replace_freq,
                 learning_rate, use_target_model=True, memory=Memory_Server, action_space=2,
                 training_episodes=7000, test_interval=50):
        # super().__init__(update_steps, memory_size, model_replace_freq, learning_rate, beta=0.99, batch_size = 32, use_target_model=True)
        self.batch_size = batch_size

        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len, output_len, learning_rate=0.0003)
        self.target_model = DQNModel(input_len, output_len)
        self.steps = 0
        self.memory = memory
        # self.memory = ReplayBuffer(hyper_params['memory_size'])
        self.prev = 0
        self.next = 0
        self.model_dq = deque()
        self.result = [0] * ((training_episodes // test_interval) + 1)
        self.previous_q_networks = []
        self.result_count = 0
        self.learning_episodes = training_episodes
        self.episode = 0
        self.is_collection_completed = False
        self.evaluator_done = False
        self.batch_num = training_episodes // test_interval
        self.use_target_model = True
        self.beta = 0.99
        self.test_interval = test_interval


    def get_evaluation_model(self):

        if self.episode >= self.learning_episodes:
            self.is_collection_completed = True

        return self.is_collection_completed


    def replace(self):
        self.target_model.replace(self.eval_model)

    def get_total_steps(self):
        return self.steps


    def predict_next(self, state, e_model):
        return e_model.predict(state)

    def get_predict(self, state):
        return self.eval_model.predict(state)

    def set_collect_count(self):
        self.next += 1

    def set_collector_count(self):
        self.episode += 1


    def get_evaluation_count(self):
        return self.result_count

    def get_evaluator_count(self):
        return self.episode

    def ask_evaluation(self):
        if len(self.previous_q_networks) > self.result_count:
            num = self.result_count
            evluation_q_network = self.previous_q_networks[num]
            self.result_count += 1
            self.episode += 50
            return evluation_q_network, False, num
        else:
            if self.episode >= self.learning_episodes:
                self.evaluator_done = True
            return [], self.evaluator_done, None

    def update_batch(self):

        self.steps += 10

        if ray.get(self.memory.__len__.remote()) < self.batch_size:  # or self.steps % self.update_steps != 0:
            return

        if self.is_collection_completed:
            return

        batch = ray.get(self.memory.sample.remote(self.batch_size))

        (states, actions, reward, next_states,
         is_terminal) = batch

        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size,
                                   dtype=torch.long)

        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]

        if self.use_target_model:
            actions, q_next = self.target_model.predict_batch(next_states)
        else:
            actions, q_next = self.eval_model.predict_batch(next_states)#dont need though

        q_targets = []

        for i in range(0, len(terminal), 1):
            if terminal[i] == 1:
                q_targets.append(reward[i])
            else:
                q_targets.append(reward[i] + (self.beta * torch.max(q_next, 1).values[i].data))

        q_target = FloatTensor(q_targets)

        self.eval_model.fit(q_values, q_target)

        if self.episode // self.test_interval + 1 > len(self.previous_q_networks):
            model_id = ray.put(self.eval_model)
            self.previous_q_networks.append(model_id)
        return self.steps

    def set_results(self, result, num):
        self.result[num] = result

    def get_results(self):
        return self.result
Exemple #3
0
class DQN_server(object):
    def __init__(self,
                 learning_rate,
                 training_episodes,
                 memory,
                 env,
                 test_interval=50,
                 batch_size=32,
                 action_space=len(ACTION_DICT),
                 beta=0.99):

        self.env = env
        #self.max_episode_steps = env._max_episode_steps

        self.batch_num = training_episodes // test_interval
        self.steps = 0

        self.collector_done = False
        self.evaluator_done = False
        self.training_episodes = training_episodes
        self.episode = 0
        #self.esults = []
        self.batch_size = batch_size
        self.privous_q_model = []
        self.results = [0] * (self.batch_num + 1)
        self.result_count = 0
        self.memory = memory
        self.use_target_model = True

        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len, output_len, learning_rate=0.0003)
        self.target_model = DQNModel(input_len, output_len)

        self.batch_size = hyper_params['batch_size']
        self.update_steps = hyper_params['update_steps']
        self.model_replace_freq = hyper_params['model_replace_freq']

    def get_eval_model(self):
        print(self.episode)
        if self.episode >= self.training_episodes:
            self.collector_done = True

        return self.collector_done

    def add_episode(self):
        self.episode += 1
        return self.episode

    def update_batch(self):
        if self.collector_done:
            return
        if ray.get(self.memory.__len__.remote()
                   ) < self.batch_size or self.steps % self.update_steps != 0:
            return

        batch = ray.get(self.memory.sample.remote(self.batch_size))

        (states, actions, reward, next_states, is_terminal) = batch

        self.steps += self.update_steps
        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]

        # Calculate target
        if self.use_target_model:
            actions, q_next = self.target_model.predict_batch(next_states)
        else:
            actions, q_next = self.eval_model.predict_batch(next_states)

        #INSERT YOUR CODE HERE --- neet to compute 'q_targets' used below
        q_targets = [0] * self.batch_size
        for i in range(self.batch_size):
            if terminal[i] == 1:
                q_targets[i] = reward[i]
            else:
                max_value = torch.max(q_next, dim=1).values[i].data
                q_targets[i] = reward[i] + beta * max_value

        q_target = FloatTensor(q_targets)

        # update model
        self.eval_model.fit(q_values, q_target)

        if self.episode // test_interval + 1 > len(self.privous_q_model):
            model_id = ray.put(self.eval_model)
            self.privous_q_model.append(model_id)
        return self.steps

    # evalutor
    def add_result(self, result, num):
        #print(num)
        self.results[num] = result

    def get_results(self):
        return self.results

    def ask_evaluation(self):
        if len(self.privous_q_model) > self.result_count:
            num = self.result_count
            evluation_q_model = self.privous_q_model[num]
            self.result_count += 1
            return evluation_q_model, False, num
        else:
            if self.episode >= self.training_episodes:
                self.evaluator_done = True
            return [], self.evaluator_done, None

    def replace(self):
        self.target_model.replace(self.eval_model)

    def predict(self, state):
        return self.eval_model.predict(state)
class DQN_server():
    def __init__(self, env, hyper_params, action_space):

        #self.env = env
        #self.max_episode_steps = env._max_episode_steps
        """
            beta: The discounted factor of Q-value function
            (epsilon): The explore or exploit policy epsilon.
            initial_epsilon: When the 'steps' is 0, the epsilon is initial_epsilon, 1
            final_epsilon: After the number of 'steps' reach 'epsilon_decay_steps',
                The epsilon set to the 'final_epsilon' determinately.
            epsilon_decay_steps: The epsilon will decrease linearly along with the steps from 0 to 'epsilon_decay_steps'.
        """
        self.beta = hyper_params['beta']
        """
            episode: Record training episode
            steps: Add 1 when predicting an action
            learning: The trigger of agent learning. It is on while training agent. It is off while testing agent.
            action_space: The action space of the current environment, e.g 2.
        """
        # self.episode = 0
        # self.steps = 0
        # self.best_reward = 0
        # self.learning = True
        # self.action_space = action_space
        """
            input_len The input length of the neural network. It equals to the length of the state vector.
            output_len: The output length of the neural network. It is equal to the action space.
            eval_model: The model for predicting action for the agent.
            target_model: The model for calculating Q-value of next_state to update 'eval_model'.
            use_target_model: Trigger for turn 'target_model' on/off
        """
        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len,
                                   output_len,
                                   learning_rate=hyper_params['learning_rate'])
        self.use_target_model = hyper_params['use_target_model']
        if self.use_target_model:
            self.target_model = DQNModel(input_len, output_len)


#         memory: Store and sample experience replay.
#self.memory = ReplayBuffer(hyper_params['memory_size'])
        """
            batch_size: Mini batch size for training model.
            update_steps: The frequence of traning model
            model_replace_freq: The frequence of replacing 'target_model' by 'eval_model'
        """
        self.batch_size = hyper_params['batch_size']
        #self.update_steps = hyper_params['update_steps']
        #self.model_replace_freq = hyper_params['model_replace_freq']

        print("server initialized")

    def replace_target_model(self):
        self.target_model.replace(self.eval_model)

    def eval_model_predict(self, state):
        return self.eval_model.predict(state)

    # This next function will be called in the main RL loop to update the neural network model given a batch of experience
    # 1) Sample a 'batch_size' batch of experiences from the memory.
    # 2) Predict the Q-value from the 'eval_model' based on (states, actions)
    # 3) Predict the Q-value from the 'target_model' base on (next_states), and take the max of each Q-value vector, Q_max
    # 4) If is_terminal == 1, q_target = reward + discounted factor * Q_max, otherwise, q_target = reward
    # 5) Call fit() to do the back-propagation for 'eval_model'.
    def update_batch(self, memory):
        current_memory_size = memory.get_current_size()
        if current_memory_size < self.batch_size:
            return

        #print("fetching minibatch from replay memory")
        batch = memory.sample(self.batch_size)

        (states, actions, reward, next_states, is_terminal) = batch

        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)

        #q_values = q_values[np.arange(self.batch_size), actions]
        q_values = q_values[batch_index, actions]

        # Calculate target
        if self.use_target_model:
            #print("target_model.predict")
            best_actions, q_next = self.target_model.predict_batch(next_states)
        else:
            best_actions, q_next = self.eval_model.predict_batch(next_states)

        q_max = q_next[batch_index, best_actions]

        terminal = 1 - terminal
        q_max *= terminal
        q_target = reward + self.beta * q_max

        # update model
        self.eval_model.fit(q_values, q_target)

    # save model
    def save_model(self):
        self.eval_model.save(result_floder + '/best_model.pt')

    # load model
    def load_model(self):
        self.eval_model.load(result_floder + '/best_model.pt')
class DQN_agent(object):
    def __init__(self, env, hyper_params, action_space=len(ACTION_DICT)):

        self.env = env
        self.max_episode_steps = env._max_episode_steps
        """
            beta: The discounted factor of Q-value function
            (epsilon): The explore or exploit policy epsilon.
            initial_epsilon: When the 'steps' is 0, the epsilon is initial_epsilon, 1
            final_epsilon: After the number of 'steps' reach 'epsilon_decay_steps',
                The epsilon set to the 'final_epsilon' determinately.
            epsilon_decay_steps: The epsilon will decrease linearly along with the steps from 0 to 'epsilon_decay_steps'.
        """
        self.beta = hyper_params['beta']
        self.initial_epsilon = 1
        self.final_epsilon = hyper_params['final_epsilon']
        self.epsilon_decay_steps = hyper_params['epsilon_decay_steps']
        """
            episode: Record training episode
            steps: Add 1 when predicting an action
            learning: The trigger of agent learning. It is on while training agent. It is off while testing agent.
            action_space: The action space of the current environment, e.g 2.
        """
        self.episode = 0
        self.steps = 0
        self.best_reward = 0
        self.learning = True
        self.action_space = action_space
        """
            input_len The input length of the neural network. It equals to the length of the state vector.
            output_len: The output length of the neural network. It is equal to the action space.
            eval_model: The model for predicting action for the agent.
            target_model: The model for calculating Q-value of next_state to update 'eval_model'.
            use_target_model: Trigger for turn 'target_model' on/off
        """
        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len,
                                   output_len,
                                   learning_rate=hyper_params['learning_rate'])
        self.use_target_model = hyper_params['use_target_model']
        if self.use_target_model:
            self.target_model = DQNModel(input_len, output_len)
#         memory: Store and sample experience replay.
        self.memory = ReplayBuffer(hyper_params['memory_size'])
        """
            batch_size: Mini batch size for training model.
            update_steps: The frequence of traning model
            model_replace_freq: The frequence of replacing 'target_model' by 'eval_model'
        """
        self.batch_size = hyper_params['batch_size']
        self.update_steps = hyper_params['update_steps']
        self.model_replace_freq = hyper_params['model_replace_freq']

        print("agent initialized")

    # Linear decrease function for epsilon
    def linear_decrease(self, initial_value, final_value, curr_steps,
                        final_decay_steps):
        decay_rate = curr_steps / final_decay_steps
        if decay_rate > 1:
            decay_rate = 1
        return initial_value - (initial_value - final_value) * decay_rate

    def explore_or_exploit_policy(self, state):
        p = uniform(0, 1)
        # Get decreased epsilon
        epsilon = self.linear_decrease(self.initial_epsilon,
                                       self.final_epsilon, self.steps,
                                       self.epsilon_decay_steps)
        #if(np.random.randint(1000)==4):
        #print("epsilon",epsilon)
        if p < epsilon:
            #return action
            return randint(0, self.action_space - 1)
        else:
            #return action
            return self.greedy_policy(state)

    def greedy_policy(self, state):
        return self.eval_model.predict(state)

    # This next function will be called in the main RL loop to update the neural network model given a batch of experience
    # 1) Sample a 'batch_size' batch of experiences from the memory.
    # 2) Predict the Q-value from the 'eval_model' based on (states, actions)
    # 3) Predict the Q-value from the 'target_model' base on (next_states), and take the max of each Q-value vector, Q_max
    # 4) If is_terminal == 1, q_target = reward + discounted factor * Q_max, otherwise, q_target = reward
    # 5) Call fit() to do the back-propagation for 'eval_model'.
    def update_batch(self):
        if len(self.memory
               ) < self.batch_size or self.steps % self.update_steps != 0:
            return

        #print("fetching minibatch from replay memory")
        batch = self.memory.sample(self.batch_size)

        (states, actions, reward, next_states, is_terminal) = batch

        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)

        #q_values = q_values[np.arange(self.batch_size), actions]
        q_values = q_values[batch_index, actions]

        # Calculate target
        if self.use_target_model:
            #print("target_model.predict")
            best_actions, q_next = self.target_model.predict_batch(next_states)
        else:
            best_actions, q_next = self.eval_model.predict_batch(next_states)

        q_max = q_next[batch_index, best_actions]

        terminal = 1 - terminal
        q_max *= terminal
        q_target = reward + self.beta * q_max

        # update model
        self.eval_model.fit(q_values, q_target)

    def learn_and_evaluate(self, training_episodes, test_interval):
        test_number = training_episodes // test_interval
        all_results = []

        for i in range(test_number):
            # learn
            self.learn(test_interval)

            # evaluate
            avg_reward = self.evaluate()
            all_results.append(avg_reward)

        return all_results

    def learn(self, test_interval):
        for episode in tqdm(range(test_interval), desc="Training"):
            state = self.env.reset()
            done = False
            steps = 0

            while steps < self.max_episode_steps and not done:
                #INSERT YOUR CODE HERE
                # add experience from explore-exploit policy to memory
                action = self.explore_or_exploit_policy(state)
                next_state, reward, done, info = self.env.step(action)
                self.memory.add(state, action, reward, next_state, done)

                # update the model every 'update_steps' of experience
                self.update_batch()

                # update the target network (if the target network is being used) every 'model_replace_freq' of experiences
                if self.use_target_model and (self.steps %
                                              self.model_replace_freq == 0):
                    self.target_model.replace(self.eval_model)

                self.steps += 1
                steps += 1
                state = next_state

    def evaluate(self, trials=30):
        total_reward = 0
        for _ in tqdm(range(trials), desc="Evaluating"):
            state = self.env.reset()
            done = False
            steps = 0

            while steps < self.max_episode_steps and not done:
                steps += 1
                action = self.greedy_policy(state)
                state, reward, done, _ = self.env.step(action)
                total_reward += reward

        avg_reward = total_reward / trials
        print(avg_reward)
        f = open(result_file, "a+")
        f.write(str(avg_reward) + "\n")
        f.close()
        if avg_reward >= self.best_reward:
            self.best_reward = avg_reward
            self.save_model()
        return avg_reward

    # save model
    def save_model(self):
        self.eval_model.save(result_floder + '/best_model.pt')

    # load model
    def load_model(self):
        self.eval_model.load(result_floder + '/best_model.pt')
class Model_Server():
    def __init__(self, env, hyper_params, memory, action_space):
        self.epsilon_decay_steps = hyper_params['epsilon_decay_steps']
        self.final_epsilon = hyper_params['final_epsilon']
        self.batch_size = hyper_params['batch_size']
        self.update_steps = hyper_params['update_steps']
        self.beta = hyper_params['beta']
        self.model_replace_freq = hyper_params['model_replace_freq']
        self.learning_rate = hyper_params['learning_rate']
        self.training_episodes = hyper_params['training_episodes']
        self.test_interval = hyper_params['test_interval']
        self.memory = memory

        self.episode = 0
        self.steps = 0
        self.result_count = 0
        self.next = 0
        self.batch_num = self.training_episodes // self.test_interval

        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate'])
        self.target_model = DQNModel(input_len, output_len)

        self.results = [0] * (self.batch_num + 1)
        self.previous_q_networks = []

        self.collector_done = False
        self.evaluator_done = False

    def ask_evaluation(self):
        if len(self.previous_q_networks) > self.result_count:
            num = self.result_count
            evaluation_q_network = self.previous_q_networks[num]
            self.result_count += 1
            return evaluation_q_network, False, num
        else:
            if self.episode >= self.training_episodes:
                self.evaluator_done = True
            return [], self.evaluator_done, None

    def get_evaluation_model(self):
        if self.episode >= self.training_episodes:
            self.collector_done = True
        return self.eval_model, self.collector_done

    def replace_with_eval_model(self):
        self.target_model.replace(self.eval_model)

    def get_model_steps(self):
        return self.steps

    def predict_next_eval(self, state, eval_model):
        return eval_model.predict(state)

    def get_predict(self, state):
        return self.eval_model.predict(state)

    def increment_episode(self):
        self.episode += 1

    def increment_model_steps(self):
        self.steps += 1
        return self.steps

    def update_batch(self):

        self.steps += self.update_steps

        if ray.get(self.memory.__len__.remote()) < self.batch_size:  # or self.steps % self.update_steps != 0:
            return

        if self.collector_done:
            return

        batch = ray.get(self.memory.sample.remote(self.batch_size))

        (states, actions, reward, next_states,
         is_terminal) = batch

        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]

        # Calculate target
        actions, q_next = self.target_model.predict_batch(next_states)

        q_max, indices = torch.max(q_next, dim=1)

        # INSERT YOUR CODE HERE --- neet to compute 'q_targets' used below
        q_targets = []
        for i, is_term in enumerate(terminal):
            if is_term == 1:
                q_targets.append(reward[i])
            else:
                q_targets.append(reward[i] + self.beta * q_max[i])
        q_targets_tensor = FloatTensor(q_targets)

        # update model
        self.eval_model.fit(q_values, q_targets_tensor)

        if self.episode // self.test_interval + 1 > len(self.previous_q_networks):
            model_id = ray.put(self.eval_model)
            self.previous_q_networks.append(model_id)
        return self.steps

    def add_result(self, reward, num):
        self.results[num] = reward

    def get_results(self):
        return self.results
Exemple #7
0
class DQN_agent(object):
    def __init__(self, env, hyper_params, action_space=len(ACTION_DICT)):

        self.env = env
        self.max_episode_steps = env._max_episode_steps

        self.beta = hyper_params['beta']
        self.initial_epsilon = 1
        self.final_epsilon = hyper_params['final_epsilon']
        self.epsilon_decay_steps = hyper_params['epsilon_decay_steps']

        self.episode = 0
        self.steps = 0
        self.best_reward = 0
        self.learning = True
        self.action_space = action_space

        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len,
                                   output_len,
                                   learning_rate=hyper_params['learning_rate'])
        self.use_target_model = hyper_params['use_target_model']
        if self.use_target_model:
            self.target_model = DQNModel(input_len, output_len)

        self.memory = ReplayBuffer(hyper_params['memory_size'])

        self.batch_size = hyper_params['batch_size']
        self.update_steps = hyper_params['update_steps']
        self.model_replace_freq = hyper_params['model_replace_freq']

    # Linear decrease function for epsilon
    def linear_decrease(self, initial_value, final_value, curr_steps,
                        final_decay_steps):
        decay_rate = curr_steps / final_decay_steps
        if decay_rate > 1:
            decay_rate = 1
        return initial_value - (initial_value - final_value) * decay_rate

    def explore_or_exploit_policy(self, state):
        p = uniform(0, 1)
        # Get decreased epsilon
        epsilon = self.linear_decrease(self.initial_epsilon,
                                       self.final_epsilon, self.steps,
                                       self.epsilon_decay_steps)

        if p < epsilon:
            #return action
            return randint(0, self.action_space - 1)
        else:
            #return action
            return self.greedy_policy(state)

    def greedy_policy(self, state):
        return self.eval_model.predict(state)

    def update_batch(self):
        if len(self.memory
               ) < self.batch_size or self.steps % self.update_steps != 0:
            return
        # 1) Sample a 'batch_size' batch of experiences from the memory.
        batch = self.memory.sample(self.batch_size)

        (states, actions, reward, next_states, is_terminal) = batch

        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values --- 2) Predict the Q-value from the 'eval_model' based on (states, actions)
        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]

        # Calculate target --- 3) Predict the Q-value from the 'target model' based on (next_states), and take max of each Q-value vector, Q_max
        if self.use_target_model:
            actions, q_next = self.target_model.predict_batch(next_states)
        else:
            actions, q_next = self.eval_model.predict_batch(next_states)

        q_next = q_next[batch_index, actions]
        q_target = FloatTensor([
            reward[index] if is_terminal[index] else reward[index] +
            self.beta * q_next[index] for index in range(self.batch_size)
        ])

        # update model
        self.eval_model.fit(q_values, q_target)

    def learn_and_evaluate(self, training_episodes, test_interval):
        test_number = training_episodes // test_interval
        all_results = []

        for i in range(test_number):
            # learn
            self.learn(test_interval)

            # evaluate
            avg_reward = self.evaluate()
            all_results.append(avg_reward)

        return all_results

    def learn(self, test_interval):
        for episode in tqdm(range(test_interval), desc="Training"):
            state = self.env.reset()
            done = False
            steps = 0

            while steps < self.max_episode_steps and not done:

                action = self.explore_or_exploit_policy(state)
                next_state, reward, done, _ = self.env.step(action)
                # Store history
                self.memory.add(state, action, reward, next_state, done)
                # Update the model
                if self.steps % self.update_steps == 0:
                    self.update_batch()
                # Update the target network if DQN uses it
                if self.use_target_model:
                    if self.steps % self.model_replace_freq == 0:
                        self.target_model.replace(self.eval_model)
                # Update information for the next loop
                state = next_state
                steps += 1
                self.steps += 1

    def evaluate(self, trials=30):
        total_reward = 0
        for _ in tqdm(range(trials), desc="Evaluating"):
            state = self.env.reset()
            done = False
            steps = 0

            while steps < self.max_episode_steps and not done:
                steps += 1
                action = self.greedy_policy(state)
                state, reward, done, _ = self.env.step(action)
                total_reward += reward

        avg_reward = total_reward / trials
        print(avg_reward)
        f = open(result_file, "a+")
        f.write(str(avg_reward) + "\n")
        f.close()
        if avg_reward >= self.best_reward:
            self.best_reward = avg_reward
            self.save_model()
        return avg_reward

    # save model
    def save_model(self):
        self.eval_model.save(result_floder + '/best_model.pt')

    # load model
    def load_model(self):
        self.eval_model.load(result_floder + '/best_model.pt')
Exemple #8
0
class ModelServer():
    def __init__(self,
                 hyper_params,
                 memory_server,
                 nb_agents,
                 nb_evaluators,
                 action_space=len(ACTION_DICT)):
        self.beta = hyper_params['beta']
        self.initial_epsilon = 1
        self.final_epsilon = hyper_params['final_epsilon']
        self.epsilon_decay_steps = hyper_params['epsilon_decay_steps']
        self.hyper_params = hyper_params
        self.update_steps = hyper_params['update_steps']
        self.model_replace_freq = hyper_params['model_replace_freq']
        self.action_space = action_space
        self.batch_size = hyper_params['batch_size']
        self.memory_server = memory_server
        self.nb_agents = nb_agents
        self.nb_evaluators = nb_evaluators
        env = CartPoleEnv()
        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len,
                                   output_len,
                                   learning_rate=hyper_params['learning_rate'])
        self.target_model = DQNModel(input_len, output_len)

        self.agents = [
            DQN_agent_remote.remote(CartPoleEnv(), memory_server, hyper_params,
                                    action_space, i) for i in range(nb_agents)
        ]
        self.evaluators = [
            EvalWorker.remote(self.eval_model, CartPoleEnv(),
                              hyper_params['max_episode_steps'],
                              hyper_params['eval_trials'], i)
            for i in range(nb_evaluators)
        ]

    # Linear decrease function for epsilon
    def linear_decrease(self, initial_value, final_value, curr_steps,
                        final_decay_steps):
        decay_rate = curr_steps / final_decay_steps
        if decay_rate > 1:
            decay_rate = 1
        return initial_value - (initial_value - final_value) * decay_rate

    def update_batch(self):
        batch = self.memory_server.sample.remote(self.batch_size)
        (states, actions, reward, next_states, is_terminal) = ray.get(batch)
        if len(states) < self.batch_size:
            return
        nonterminal_x_beta = FloatTensor(
            [0 if t else self.beta for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)
        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]
        # Calculate target
        actions, q_next = self.target_model.predict_batch(next_states)
        q_targets = reward + nonterminal_x_beta * torch.max(q_next, 1).values
        # update model
        self.eval_model.fit(q_values, q_targets)

    def learn(self, test_interval, epsilon):
        # determine which collectors are idle
        ready_ids, _ = ray.wait(
            [agent.pingback.remote() for agent in self.agents], num_returns=1)
        ready_agents = ray.get(ready_ids)
        # send eval model to idle collectors, initiate collection
        for agent_id in ready_agents:
            self.agents[agent_id].collect.remote(self.eval_model,
                                                 test_interval, epsilon)

    def evaluate(self, all_results):
        # determine which evaluators are idle
        ready_ids, _ = ray.wait(
            [evaluator.pingback.remote() for evaluator in self.evaluators],
            num_returns=1)
        ready_evaluators = ray.get(ready_ids)
        # send eval model to idle evaluators, get results
        for evaluator_id in ready_evaluators:
            avg_reward = ray.get(
                self.evaluators[evaluator_id].evaluate.remote())
            all_results.append(avg_reward)

    def learn_and_evaluate(self, training_episodes, test_interval):
        test_number = training_episodes // test_interval
        all_results = []
        for i in range(test_number):
            self.steps = i * test_interval
            # Get decreased epsilon
            epsilon = self.linear_decrease(self.initial_epsilon,
                                           self.final_epsilon, self.steps,
                                           self.epsilon_decay_steps)
            # send eval model to collectors, have them collect experience
            self.learn(test_interval, epsilon)
            # sample experience from memory server, perform batch update on eval model
            if self.steps % self.update_steps == 0:
                self.update_batch()
            # replace target model
            if self.steps % self.model_replace_freq == 0:
                self.target_model.replace(self.eval_model)
            # send eval model to evaluators, record results
            self.evaluate(all_results)
        return all_results
Exemple #9
0
class DQN_model_server(object):
    def __init__(self, env, memory, action_space=2, test_interval=50):

        self.collector_done = False
        self.evaluator_done = False

        self.env = env
        # self.max_episode_steps = env._max_episode_steps
        self.max_episode_steps = 200

        self.beta = hyperparams_CartPole['beta']
        self.initial_epsilon = 1
        self.final_epsilon = hyperparams_CartPole['final_epsilon']
        self.epsilon_decay_steps = hyperparams_CartPole['epsilon_decay_steps']
        self.batch_size = hyperparams_CartPole['batch_size']

        self.episode = 0
        self.steps = 0
        self.best_reward = 0
        self.learning = True
        self.action_space = action_space

        self.previous_q_models = []
        self.results = [0] * (self.batch_size + 1)
        self.reuslt_count = 0
        self.episode = 0
        self.test_interval = test_interval
        self.memory = memory

        state = env.reset()
        input_len = len(state)
        output_len = action_space

        self.eval_model = DQNModel(input_len, output_len, learning_rate=hyperparams_CartPole['learning_rate'])

        self.use_target_model = hyperparams_CartPole['use_target_model']
        if self.use_target_model:
            self.target_model = DQNModel(input_len, output_len)

        # #         memory: Store and sample experience replay.
        #         self.memory = ReplayBuffer(hyper_params['memory_size'])

        self.batch_size = hyperparams_CartPole['batch_size']
        self.update_steps = hyperparams_CartPole['update_steps']
        self.model_replace_freq = hyperparams_CartPole['model_replace_freq']


    def get_steps(self):
        return self.steps

    def update_batch(self):

        # if len(memory) < self.batch_size or self.steps % self.update_steps != 0:
        #     return
        # print(len(self.memory.remote()))
        batch = self.memory.sample.remote(self.batch_size)
        (states, actions, reward, next_states, is_terminal) = ray.get(batch)

        states = states
        next_states = next_states
        terminal = FloatTensor([1 if t else 0 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values
        _, self.q_values = self.eval_model.predict_batch(states)
        self.q_values = self.q_values[batch_index, actions]

        # Calculate target
        if self.use_target_model:
            actions, self.q_next = self.target_model.predict_batch(next_states)
            self.q_next = self.q_next[batch_index, actions]
        else:
            actions, self.q_next = self.eval_model.predict_batch(next_states)
            self.q_next = self.q_next[batch_index, actions]

        # INSERT YOUR CODE HERE --- neet to compute 'q_targets' used below
        self.q_target = []

        for i in range(len(reward)):

            if terminal[i] == 1:
                self.q_target.append(reward[i])
            else:
                self.q_target.append(reward[i] + self.beta * self.q_next[i])
    
        self.q_target = FloatTensor(self.q_target)

        # update model
        self.eval_model.fit(self.q_values, self.q_target)

        if(np.random.randint(100)==4):
            print("==========",self.q_values[0],self.q_target[0])
            # print("..................................................", self.evaluate())

        # score = self.evaluate()
        # f_results = open("./results_8_4.txt", "a+")
        # f_results.write(str(score) + "\n")
        # f_results.close()

        if self.episode // self.test_interval + 1 > len(self.previous_q_models):
            model_id = ray.put(self.eval_model)
            self.previous_q_models.append(model_id)

        self.steps += 10
        return self.steps

    def greedy_policy(self, state):
        return self.eval_model.predict(state)

    def replace_target(self):

        return self.target_model.replace(self.eval_model)

        # evalutor

    # def add_result(self, result):
    #     self.results[num] = result

    def get_reuslts(self):
        return self.results


    def ask_evaluation(self):
        if len(self.previous_q_models) > self.reuslt_count:
            num = self.reuslt_count
            evluation_q_model = self.previous_q_models[num]
            self.reuslt_count += 1
            return evluation_q_model, False, num
        else:
            if self.episode >= training_episodes:
                self.evaluator_done = True
            return [], self.evaluator_done, None

    def add_episode(self):

        self.episode += 1
class RLAgent_model_server():
    def __init__(self, env, hyper_params, memo_server):
        self.memory_server = memo_server
        self.env = env
        self.max_episode_steps = env._max_episode_steps

        self.beta = hyper_params['beta']
        self.training_episodes = hyper_params['training_episodes']
        self.test_interval = hyper_params['test_interval']

        action_space = len(ACTION_DICT)
        self.episode = 0
        self.steps = 0
        self.best_reward = 0
        self.learning = True
        self.action_space = action_space

        state = env.reset()
        input_len = len(state)
        output_len = action_space
        self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate'])
        self.use_target_model = hyper_params['use_target_model']
        if self.use_target_model:
            self.target_model = DQNModel(input_len, output_len)

        self.batch_size = hyper_params['batch_size']
        self.update_steps = hyper_params['update_steps']
        self.model_replace_freq = hyper_params['model_replace_freq']
        self.collector_done = False
        self.results = []

        self.initial_epsilon = 1
        self.final_epsilon = hyper_params['final_epsilon']
        self.epsilon_decay_steps = hyper_params['epsilon_decay_steps']
        self.replace_targe_cnt = 0
        self.epsilon = 1
        self.eval_models_seq = 1

    def update_batch(self):
        # Get memory sample
        batch = ray.get(self.memory_server.sample.remote(self.batch_size))
        if not batch:
            return
        (states, actions, reward, next_states, is_terminal) = batch

        # Setting torch value
        states = states
        next_states = next_states
        terminal = FloatTensor([0 if t else 1 for t in is_terminal])
        reward = FloatTensor(reward)
        batch_index = torch.arange(self.batch_size, dtype=torch.long)

        # Current Q Values
        _, q_values = self.eval_model.predict_batch(states)
        q_values = q_values[batch_index, actions]

        # Calculate target
        if self.use_target_model:
            actions, q_next = self.target_model.predict_batch(next_states)
        else:
            actions, q_next = self.eval_model.predict_batch(next_states)
        max_q_next, index = torch.max(q_next, dim=1)
        q_target = reward + self.beta * max_q_next * terminal
        # Update model
        self.eval_model.fit(q_values, q_target)

    def replace_target_model(self):
        if self.use_target_model and self.steps % self.model_replace_freq == 0:
            self.target_model.replace(self.eval_model)

    def evaluate_result(self):
#         print(self.episode, self.training_episodes)
        self.episode += 1
        if self.episode % self.test_interval == 0:
            self.save_model()
#             evaluation_worker_gg.remote(self.env, self.memory_server, self.eval_model, self.test_interval)

    def save_model(self):
        filename = "/best_model{0}.pt".format(self.eval_models_seq)
        self.eval_model.save(result_floder + filename)
        self.memory_server.add_evamodel_dir.remote(result_floder + filename)
        self.eval_models_seq += 1

    def ask_evaluate(self):
        if len(self.eval_models) == 0:
            return None, self.episode >= self.training_episodes

        eval_model, is_done = self.eval_models[0]
        del self.eval_models[0]
        return eval_model, is_done

    def get_collector_done(self):
        return self.episode >= self.training_episodes

    def linear_decrease(self, initial_value, final_value, curr_steps, final_decay_steps):
        decay_rate = curr_steps / final_decay_steps
        if decay_rate > 1:
            decay_rate = 1
        return initial_value - (initial_value - final_value) * decay_rate

    def explore_or_exploit_policy(self, state):
        self.epsilon = self.linear_decrease(self.initial_epsilon, 
                                            self.final_epsilon, 
                                            self.steps,
                                            self.epsilon_decay_steps)
        return randint(0, self.action_space - 1) if uniform(0, 1) < self.epsilon else self.greedy_policy(state)

    def greedy_policy(self, state):
        return self.eval_model.predict(state)

    def add_results(self, result):
        self.results.append(result)

    def get_reuslts(self):
        return self.results

    def update_and_replace_model(self):
        self.steps += 1
        if self.steps % self.update_steps != 0:
            self.update_batch()
        self.replace_target_model()